Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 769f925..87f4d05 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -130,8 +130,6 @@
 					<mailto:zapman@interlan.net>
 'i'	00-3F	linux/i2o.h
 'j'	00-3F	linux/joystick.h
-'k'	all	asm-sparc/kbio.h
-		asm-sparc64/kbio.h
 'l'	00-3F	linux/tcfs_fs.h		transparent cryptographic file system
 					<http://mikonos.dia.unisa.it/tcfs>
 'l'	40-7F	linux/udf_fs_i.h	in development:
diff --git a/Documentation/sharedsubtree.txt b/Documentation/sharedsubtree.txt
new file mode 100644
index 0000000..2d8f403
--- /dev/null
+++ b/Documentation/sharedsubtree.txt
@@ -0,0 +1,1060 @@
+Shared Subtrees
+---------------
+
+Contents:
+	1) Overview
+	2) Features
+	3) smount command
+	4) Use-case
+	5) Detailed semantics
+	6) Quiz
+	7) FAQ
+	8) Implementation
+
+
+1) Overview
+-----------
+
+Consider the following situation:
+
+A process wants to clone its own namespace, but still wants to access the CD
+that got mounted recently.  Shared subtree semantics provide the necessary
+mechanism to accomplish the above.
+
+It provides the necessary building blocks for features like per-user-namespace
+and versioned filesystem.
+
+2) Features
+-----------
+
+Shared subtree provides four different flavors of mounts; struct vfsmount to be
+precise
+
+	a. shared mount
+	b. slave mount
+	c. private mount
+	d. unbindable mount
+
+
+2a) A shared mount can be replicated to as many mountpoints and all the
+replicas continue to be exactly same.
+
+	Here is an example:
+
+	Lets say /mnt has a mount that is shared.
+	mount --make-shared /mnt
+
+	note: mount command does not yet support the --make-shared flag.
+	I have included a small C program which does the same by executing
+	'smount /mnt shared'
+
+	#mount --bind /mnt /tmp
+	The above command replicates the mount at /mnt to the mountpoint /tmp
+	and the contents of both the mounts remain identical.
+
+	#ls /mnt
+	a b c
+
+	#ls /tmp
+	a b c
+
+	Now lets say we mount a device at /tmp/a
+	#mount /dev/sd0  /tmp/a
+
+	#ls /tmp/a
+	t1 t2 t2
+
+	#ls /mnt/a
+	t1 t2 t2
+
+	Note that the mount has propagated to the mount at /mnt as well.
+
+	And the same is true even when /dev/sd0 is mounted on /mnt/a. The
+	contents will be visible under /tmp/a too.
+
+
+2b) A slave mount is like a shared mount except that mount and umount events
+	only propagate towards it.
+
+	All slave mounts have a master mount which is a shared.
+
+	Here is an example:
+
+	Lets say /mnt has a mount which is shared.
+	#mount --make-shared /mnt
+
+	Lets bind mount /mnt to /tmp
+	#mount --bind /mnt /tmp
+
+	the new mount at /tmp becomes a shared mount and it is a replica of
+	the mount at /mnt.
+
+	Now lets make the mount at /tmp; a slave of /mnt
+	#mount --make-slave /tmp
+	[or smount /tmp slave]
+
+	lets mount /dev/sd0 on /mnt/a
+	#mount /dev/sd0 /mnt/a
+
+	#ls /mnt/a
+	t1 t2 t3
+
+	#ls /tmp/a
+	t1 t2 t3
+
+	Note the mount event has propagated to the mount at /tmp
+
+	However lets see what happens if we mount something on the mount at /tmp
+
+	#mount /dev/sd1 /tmp/b
+
+	#ls /tmp/b
+	s1 s2 s3
+
+	#ls /mnt/b
+
+	Note how the mount event has not propagated to the mount at
+	/mnt
+
+
+2c) A private mount does not forward or receive propagation.
+
+	This is the mount we are familiar with. Its the default type.
+
+
+2d) A unbindable mount is a unbindable private mount
+
+	lets say we have a mount at /mnt and we make is unbindable
+
+	#mount --make-unbindable /mnt
+	 [ smount /mnt  unbindable ]
+
+	 Lets try to bind mount this mount somewhere else.
+	 # mount --bind /mnt /tmp
+	 mount: wrong fs type, bad option, bad superblock on /mnt,
+	        or too many mounted file systems
+
+	Binding a unbindable mount is a invalid operation.
+
+
+3) smount command
+
+	Currently the mount command is not aware of shared subtree features.
+	Work is in progress to add the support in mount ( util-linux package ).
+	Till then use the following program.
+
+	------------------------------------------------------------------------
+	//
+	//this code was developed my Miklos Szeredi <miklos@szeredi.hu>
+	//and modified by Ram Pai <linuxram@us.ibm.com>
+	// sample usage:
+	//              smount /tmp shared
+	//
+	#include <stdio.h>
+	#include <stdlib.h>
+	#include <unistd.h>
+	#include <sys/mount.h>
+	#include <sys/fsuid.h>
+
+	#ifndef MS_REC
+	#define MS_REC		0x4000	/* 16384: Recursive loopback */
+	#endif
+
+	#ifndef MS_SHARED
+	#define MS_SHARED		1<<20	/* Shared */
+	#endif
+
+	#ifndef MS_PRIVATE
+	#define MS_PRIVATE		1<<18	/* Private */
+	#endif
+
+	#ifndef MS_SLAVE
+	#define MS_SLAVE		1<<19	/* Slave */
+	#endif
+
+	#ifndef MS_UNBINDABLE
+	#define MS_UNBINDABLE		1<<17	/* Unbindable */
+	#endif
+
+	int main(int argc, char *argv[])
+	{
+		int type;
+		if(argc != 3) {
+			fprintf(stderr, "usage: %s dir "
+			"<rshared|rslave|rprivate|runbindable|shared|slave"
+			"|private|unbindable>\n" , argv[0]);
+			return 1;
+		}
+
+		fprintf(stdout, "%s %s %s\n", argv[0], argv[1], argv[2]);
+
+		if (strcmp(argv[2],"rshared")==0)
+			type=(MS_SHARED|MS_REC);
+		else if (strcmp(argv[2],"rslave")==0)
+			type=(MS_SLAVE|MS_REC);
+		else if (strcmp(argv[2],"rprivate")==0)
+			type=(MS_PRIVATE|MS_REC);
+		else if (strcmp(argv[2],"runbindable")==0)
+			type=(MS_UNBINDABLE|MS_REC);
+		else if (strcmp(argv[2],"shared")==0)
+			type=MS_SHARED;
+		else if (strcmp(argv[2],"slave")==0)
+			type=MS_SLAVE;
+		else if (strcmp(argv[2],"private")==0)
+			type=MS_PRIVATE;
+		else if (strcmp(argv[2],"unbindable")==0)
+			type=MS_UNBINDABLE;
+		else {
+			fprintf(stderr, "invalid operation: %s\n", argv[2]);
+			return 1;
+		}
+		setfsuid(getuid());
+
+		if(mount("", argv[1], "dontcare", type, "") == -1) {
+			perror("mount");
+			return 1;
+		}
+		return 0;
+	}
+	-----------------------------------------------------------------------
+
+	Copy the above code snippet into smount.c
+	gcc -o smount smount.c
+
+
+	(i) To mark all the mounts under /mnt as shared execute the following
+	command:
+
+	 	smount /mnt rshared
+		the corresponding syntax planned for mount command is
+		mount --make-rshared /mnt
+
+	    just to mark a mount /mnt as shared, execute the following
+	    command:
+	 	smount /mnt shared
+		the corresponding syntax planned for mount command is
+		mount --make-shared /mnt
+
+	(ii) To mark all the shared mounts under /mnt as slave execute the
+	following
+
+	     command:
+		smount /mnt rslave
+		the corresponding syntax planned for mount command is
+		mount --make-rslave /mnt
+
+	    just to mark a mount /mnt as slave, execute the following
+	    command:
+	 	smount /mnt slave
+		the corresponding syntax planned for mount command is
+		mount --make-slave /mnt
+
+	(iii) To mark all the mounts under /mnt as private execute the
+	following command:
+
+		smount /mnt rprivate
+		the corresponding syntax planned for mount command is
+		mount --make-rprivate /mnt
+
+	    just to mark a mount /mnt as private, execute the following
+	    command:
+	 	smount /mnt private
+		the corresponding syntax planned for mount command is
+		mount --make-private /mnt
+
+	      NOTE: by default all the mounts are created as private. But if
+	      you want to change some shared/slave/unbindable  mount as
+	      private at a later point in time, this command can help.
+
+	(iv) To mark all the mounts under /mnt as unbindable execute the
+	following
+
+	     command:
+		smount /mnt runbindable
+		the corresponding syntax planned for mount command is
+		mount --make-runbindable /mnt
+
+	    just to mark a mount /mnt as unbindable, execute the following
+	    command:
+	 	smount /mnt unbindable
+		the corresponding syntax planned for mount command is
+		mount --make-unbindable /mnt
+
+
+4) Use cases
+------------
+
+	A) A process wants to clone its own namespace, but still wants to
+	   access the CD that got mounted recently.
+
+	   Solution:
+
+		The system administrator can make the mount at /cdrom shared
+		mount --bind /cdrom /cdrom
+		mount --make-shared /cdrom
+
+		Now any process that clones off a new namespace will have a
+		mount at /cdrom which is a replica of the same mount in the
+		parent namespace.
+
+		So when a CD is inserted and mounted at /cdrom that mount gets
+		propagated to the other mount at /cdrom in all the other clone
+		namespaces.
+
+	B) A process wants its mounts invisible to any other process, but
+	still be able to see the other system mounts.
+
+	   Solution:
+
+		To begin with, the administrator can mark the entire mount tree
+		as shareable.
+
+		mount --make-rshared /
+
+		A new process can clone off a new namespace. And mark some part
+		of its namespace as slave
+
+		mount --make-rslave /myprivatetree
+
+		Hence forth any mounts within the /myprivatetree done by the
+		process will not show up in any other namespace. However mounts
+		done in the parent namespace under /myprivatetree still shows
+		up in the process's namespace.
+
+
+	Apart from the above semantics this feature provides the
+	building blocks to solve the following problems:
+
+	C)  Per-user namespace
+
+		The above semantics allows a way to share mounts across
+		namespaces.  But namespaces are associated with processes. If
+		namespaces are made first class objects with user API to
+		associate/disassociate a namespace with userid, then each user
+		could have his/her own namespace and tailor it to his/her
+		requirements. Offcourse its needs support from PAM.
+
+	D)  Versioned files
+
+		If the entire mount tree is visible at multiple locations, then
+		a underlying versioning file system can return different
+		version of the file depending on the path used to access that
+		file.
+
+		An example is:
+
+		mount --make-shared /
+		mount --rbind / /view/v1
+		mount --rbind / /view/v2
+		mount --rbind / /view/v3
+		mount --rbind / /view/v4
+
+		and if /usr has a versioning filesystem mounted, than that
+		mount appears at /view/v1/usr, /view/v2/usr, /view/v3/usr and
+		/view/v4/usr too
+
+		A user can request v3 version of the file /usr/fs/namespace.c
+		by accessing /view/v3/usr/fs/namespace.c . The underlying
+		versioning filesystem can then decipher that v3 version of the
+		filesystem is being requested and return the corresponding
+		inode.
+
+5) Detailed semantics:
+-------------------
+	The section below explains the detailed semantics of
+	bind, rbind, move, mount, umount and clone-namespace operations.
+
+	Note: the word 'vfsmount' and the noun 'mount' have been used
+	to mean the same thing, throughout this document.
+
+5a) Mount states
+
+	A given mount can be in one of the following states
+	1) shared
+	2) slave
+	3) shared and slave
+	4) private
+	5) unbindable
+
+	A 'propagation event' is defined as event generated on a vfsmount
+	that leads to mount or unmount actions in other vfsmounts.
+
+	A 'peer group' is defined as a group of vfsmounts that propagate
+	events to each other.
+
+	(1) Shared mounts
+
+		A 'shared mount' is defined as a vfsmount that belongs to a
+		'peer group'.
+
+		For example:
+			mount --make-shared /mnt
+			mount --bin /mnt /tmp
+
+		The mount at /mnt and that at /tmp are both shared and belong
+		to the same peer group. Anything mounted or unmounted under
+		/mnt or /tmp reflect in all the other mounts of its peer
+		group.
+
+
+	(2) Slave mounts
+
+		A 'slave mount' is defined as a vfsmount that receives
+		propagation events and does not forward propagation events.
+
+		A slave mount as the name implies has a master mount from which
+		mount/unmount events are received. Events do not propagate from
+		the slave mount to the master.  Only a shared mount can be made
+		a slave by executing the following command
+
+			mount --make-slave mount
+
+		A shared mount that is made as a slave is no more shared unless
+		modified to become shared.
+
+	(3) Shared and Slave
+
+		A vfsmount can be both shared as well as slave.  This state
+		indicates that the mount is a slave of some vfsmount, and
+		has its own peer group too.  This vfsmount receives propagation
+		events from its master vfsmount, and also forwards propagation
+		events to its 'peer group' and to its slave vfsmounts.
+
+		Strictly speaking, the vfsmount is shared having its own
+		peer group, and this peer-group is a slave of some other
+		peer group.
+
+		Only a slave vfsmount can be made as 'shared and slave' by
+		either executing the following command
+			mount --make-shared mount
+		or by moving the slave vfsmount under a shared vfsmount.
+
+	(4) Private mount
+
+		A 'private mount' is defined as vfsmount that does not
+		receive or forward any propagation events.
+
+	(5) Unbindable mount
+
+		A 'unbindable mount' is defined as vfsmount that does not
+		receive or forward any propagation events and cannot
+		be bind mounted.
+
+
+   	State diagram:
+   	The state diagram below explains the state transition of a mount,
+	in response to various commands.
+	------------------------------------------------------------------------
+	|             |make-shared |  make-slave  | make-private |make-unbindab|
+	--------------|------------|--------------|--------------|-------------|
+	|shared	      |shared	   |*slave/private|   private	 | unbindable  |
+	|             |            |              |              |             |
+	|-------------|------------|--------------|--------------|-------------|
+	|slave	      |shared      |	**slave	  |    private   | unbindable  |
+	|             |and slave   |              |              |             |
+	|-------------|------------|--------------|--------------|-------------|
+	|shared	      |shared      |    slave	  |    private   | unbindable  |
+	|and slave    |and slave   |              |              |             |
+	|-------------|------------|--------------|--------------|-------------|
+	|private      |shared	   |  **private	  |    private   | unbindable  |
+	|-------------|------------|--------------|--------------|-------------|
+	|unbindable   |shared	   |**unbindable  |    private   | unbindable  |
+	------------------------------------------------------------------------
+
+	* if the shared mount is the only mount in its peer group, making it
+	slave, makes it private automatically. Note that there is no master to
+	which it can be slaved to.
+
+	** slaving a non-shared mount has no effect on the mount.
+
+	Apart from the commands listed below, the 'move' operation also changes
+	the state of a mount depending on type of the destination mount. Its
+	explained in section 5d.
+
+5b) Bind semantics
+
+	Consider the following command
+
+	mount --bind A/a  B/b
+
+	where 'A' is the source mount, 'a' is the dentry in the mount 'A', 'B'
+	is the destination mount and 'b' is the dentry in the destination mount.
+
+	The outcome depends on the type of mount of 'A' and 'B'. The table
+	below contains quick reference.
+   ---------------------------------------------------------------------------
+   |         BIND MOUNT OPERATION                                            |
+   |**************************************************************************
+   |source(A)->| shared       |       private  |       slave    | unbindable |
+   | dest(B)  |               |                |                |            |
+   |   |      |               |                |                |            |
+   |   v      |               |                |                |            |
+   |**************************************************************************
+   |  shared  | shared        |     shared     | shared & slave |  invalid   |
+   |          |               |                |                |            |
+   |non-shared| shared        |      private   |      slave     |  invalid   |
+   ***************************************************************************
+
+     	Details:
+
+	1. 'A' is a shared mount and 'B' is a shared mount. A new mount 'C'
+	which is clone of 'A', is created. Its root dentry is 'a' . 'C' is
+	mounted on mount 'B' at dentry 'b'. Also new mount 'C1', 'C2', 'C3' ...
+	are created and mounted at the dentry 'b' on all mounts where 'B'
+	propagates to. A new propagation tree containing 'C1',..,'Cn' is
+	created. This propagation tree is identical to the propagation tree of
+	'B'.  And finally the peer-group of 'C' is merged with the peer group
+	of 'A'.
+
+	2. 'A' is a private mount and 'B' is a shared mount. A new mount 'C'
+	which is clone of 'A', is created. Its root dentry is 'a'. 'C' is
+	mounted on mount 'B' at dentry 'b'. Also new mount 'C1', 'C2', 'C3' ...
+	are created and mounted at the dentry 'b' on all mounts where 'B'
+	propagates to. A new propagation tree is set containing all new mounts
+	'C', 'C1', .., 'Cn' with exactly the same configuration as the
+	propagation tree for 'B'.
+
+	3. 'A' is a slave mount of mount 'Z' and 'B' is a shared mount. A new
+	mount 'C' which is clone of 'A', is created. Its root dentry is 'a' .
+	'C' is mounted on mount 'B' at dentry 'b'. Also new mounts 'C1', 'C2',
+	'C3' ... are created and mounted at the dentry 'b' on all mounts where
+	'B' propagates to. A new propagation tree containing the new mounts
+	'C','C1',..  'Cn' is created. This propagation tree is identical to the
+	propagation tree for 'B'. And finally the mount 'C' and its peer group
+	is made the slave of mount 'Z'.  In other words, mount 'C' is in the
+	state 'slave and shared'.
+
+	4. 'A' is a unbindable mount and 'B' is a shared mount. This is a
+	invalid operation.
+
+	5. 'A' is a private mount and 'B' is a non-shared(private or slave or
+	unbindable) mount. A new mount 'C' which is clone of 'A', is created.
+	Its root dentry is 'a'. 'C' is mounted on mount 'B' at dentry 'b'.
+
+	6. 'A' is a shared mount and 'B' is a non-shared mount. A new mount 'C'
+	which is a clone of 'A' is created. Its root dentry is 'a'. 'C' is
+	mounted on mount 'B' at dentry 'b'.  'C' is made a member of the
+	peer-group of 'A'.
+
+	7. 'A' is a slave mount of mount 'Z' and 'B' is a non-shared mount. A
+	new mount 'C' which is a clone of 'A' is created. Its root dentry is
+	'a'.  'C' is mounted on mount 'B' at dentry 'b'. Also 'C' is set as a
+	slave mount of 'Z'. In other words 'A' and 'C' are both slave mounts of
+	'Z'.  All mount/unmount events on 'Z' propagates to 'A' and 'C'. But
+	mount/unmount on 'A' do not propagate anywhere else. Similarly
+	mount/unmount on 'C' do not propagate anywhere else.
+
+	8. 'A' is a unbindable mount and 'B' is a non-shared mount. This is a
+	invalid operation. A unbindable mount cannot be bind mounted.
+
+5c) Rbind semantics
+
+	rbind is same as bind. Bind replicates the specified mount.  Rbind
+	replicates all the mounts in the tree belonging to the specified mount.
+	Rbind mount is bind mount applied to all the mounts in the tree.
+
+	If the source tree that is rbind has some unbindable mounts,
+	then the subtree under the unbindable mount is pruned in the new
+	location.
+
+	eg: lets say we have the following mount tree.
+
+		A
+	      /   \
+	      B   C
+	     / \ / \
+	     D E F G
+
+	     Lets say all the mount except the mount C in the tree are
+	     of a type other than unbindable.
+
+	     If this tree is rbound to say Z
+
+	     We will have the following tree at the new location.
+
+		Z
+		|
+		A'
+	       /
+	      B'		Note how the tree under C is pruned
+	     / \ 		in the new location.
+	    D' E'
+
+
+
+5d) Move semantics
+
+	Consider the following command
+
+	mount --move A  B/b
+
+	where 'A' is the source mount, 'B' is the destination mount and 'b' is
+	the dentry in the destination mount.
+
+	The outcome depends on the type of the mount of 'A' and 'B'. The table
+	below is a quick reference.
+   ---------------------------------------------------------------------------
+   |         		MOVE MOUNT OPERATION                                 |
+   |**************************************************************************
+   | source(A)->| shared      |       private  |       slave    | unbindable |
+   | dest(B)  |               |                |                |            |
+   |   |      |               |                |                |            |
+   |   v      |               |                |                |            |
+   |**************************************************************************
+   |  shared  | shared        |     shared     |shared and slave|  invalid   |
+   |          |               |                |                |            |
+   |non-shared| shared        |      private   |    slave       | unbindable |
+   ***************************************************************************
+	NOTE: moving a mount residing under a shared mount is invalid.
+
+      Details follow:
+
+	1. 'A' is a shared mount and 'B' is a shared mount.  The mount 'A' is
+	mounted on mount 'B' at dentry 'b'.  Also new mounts 'A1', 'A2'...'An'
+	are created and mounted at dentry 'b' on all mounts that receive
+	propagation from mount 'B'. A new propagation tree is created in the
+	exact same configuration as that of 'B'. This new propagation tree
+	contains all the new mounts 'A1', 'A2'...  'An'.  And this new
+	propagation tree is appended to the already existing propagation tree
+	of 'A'.
+
+	2. 'A' is a private mount and 'B' is a shared mount. The mount 'A' is
+	mounted on mount 'B' at dentry 'b'. Also new mount 'A1', 'A2'... 'An'
+	are created and mounted at dentry 'b' on all mounts that receive
+	propagation from mount 'B'. The mount 'A' becomes a shared mount and a
+	propagation tree is created which is identical to that of
+	'B'. This new propagation tree contains all the new mounts 'A1',
+	'A2'...  'An'.
+
+	3. 'A' is a slave mount of mount 'Z' and 'B' is a shared mount.  The
+	mount 'A' is mounted on mount 'B' at dentry 'b'.  Also new mounts 'A1',
+	'A2'... 'An' are created and mounted at dentry 'b' on all mounts that
+	receive propagation from mount 'B'. A new propagation tree is created
+	in the exact same configuration as that of 'B'. This new propagation
+	tree contains all the new mounts 'A1', 'A2'...  'An'.  And this new
+	propagation tree is appended to the already existing propagation tree of
+	'A'.  Mount 'A' continues to be the slave mount of 'Z' but it also
+	becomes 'shared'.
+
+	4. 'A' is a unbindable mount and 'B' is a shared mount. The operation
+	is invalid. Because mounting anything on the shared mount 'B' can
+	create new mounts that get mounted on the mounts that receive
+	propagation from 'B'.  And since the mount 'A' is unbindable, cloning
+	it to mount at other mountpoints is not possible.
+
+	5. 'A' is a private mount and 'B' is a non-shared(private or slave or
+	unbindable) mount. The mount 'A' is mounted on mount 'B' at dentry 'b'.
+
+	6. 'A' is a shared mount and 'B' is a non-shared mount.  The mount 'A'
+	is mounted on mount 'B' at dentry 'b'.  Mount 'A' continues to be a
+	shared mount.
+
+	7. 'A' is a slave mount of mount 'Z' and 'B' is a non-shared mount.
+	The mount 'A' is mounted on mount 'B' at dentry 'b'.  Mount 'A'
+	continues to be a slave mount of mount 'Z'.
+
+	8. 'A' is a unbindable mount and 'B' is a non-shared mount. The mount
+	'A' is mounted on mount 'B' at dentry 'b'. Mount 'A' continues to be a
+	unbindable mount.
+
+5e) Mount semantics
+
+	Consider the following command
+
+	mount device  B/b
+
+	'B' is the destination mount and 'b' is the dentry in the destination
+	mount.
+
+	The above operation is the same as bind operation with the exception
+	that the source mount is always a private mount.
+
+
+5f) Unmount semantics
+
+	Consider the following command
+
+	umount A
+
+	where 'A' is a mount mounted on mount 'B' at dentry 'b'.
+
+	If mount 'B' is shared, then all most-recently-mounted mounts at dentry
+	'b' on mounts that receive propagation from mount 'B' and does not have
+	sub-mounts within them are unmounted.
+
+	Example: Lets say 'B1', 'B2', 'B3' are shared mounts that propagate to
+	each other.
+
+	lets say 'A1', 'A2', 'A3' are first mounted at dentry 'b' on mount
+	'B1', 'B2' and 'B3' respectively.
+
+	lets say 'C1', 'C2', 'C3' are next mounted at the same dentry 'b' on
+	mount 'B1', 'B2' and 'B3' respectively.
+
+	if 'C1' is unmounted, all the mounts that are most-recently-mounted on
+	'B1' and on the mounts that 'B1' propagates-to are unmounted.
+
+	'B1' propagates to 'B2' and 'B3'. And the most recently mounted mount
+	on 'B2' at dentry 'b' is 'C2', and that of mount 'B3' is 'C3'.
+
+	So all 'C1', 'C2' and 'C3' should be unmounted.
+
+	If any of 'C2' or 'C3' has some child mounts, then that mount is not
+	unmounted, but all other mounts are unmounted. However if 'C1' is told
+	to be unmounted and 'C1' has some sub-mounts, the umount operation is
+	failed entirely.
+
+5g) Clone Namespace
+
+	A cloned namespace contains all the mounts as that of the parent
+	namespace.
+
+	Lets say 'A' and 'B' are the corresponding mounts in the parent and the
+	child namespace.
+
+	If 'A' is shared, then 'B' is also shared and 'A' and 'B' propagate to
+	each other.
+
+	If 'A' is a slave mount of 'Z', then 'B' is also the slave mount of
+	'Z'.
+
+	If 'A' is a private mount, then 'B' is a private mount too.
+
+	If 'A' is unbindable mount, then 'B' is a unbindable mount too.
+
+
+6) Quiz
+
+	A. What is the result of the following command sequence?
+
+		mount --bind /mnt /mnt
+		mount --make-shared /mnt
+		mount --bind /mnt /tmp
+		mount --move /tmp /mnt/1
+
+		what should be the contents of /mnt /mnt/1 /mnt/1/1 should be?
+		Should they all be identical? or should /mnt and /mnt/1 be
+		identical only?
+
+
+	B. What is the result of the following command sequence?
+
+		mount --make-rshared /
+		mkdir -p /v/1
+		mount --rbind / /v/1
+
+		what should be the content of /v/1/v/1 be?
+
+
+	C. What is the result of the following command sequence?
+
+		mount --bind /mnt /mnt
+		mount --make-shared /mnt
+		mkdir -p /mnt/1/2/3 /mnt/1/test
+		mount --bind /mnt/1 /tmp
+		mount --make-slave /mnt
+		mount --make-shared /mnt
+		mount --bind /mnt/1/2 /tmp1
+		mount --make-slave /mnt
+
+		At this point we have the first mount at /tmp and
+		its root dentry is 1. Lets call this mount 'A'
+		And then we have a second mount at /tmp1 with root
+		dentry 2. Lets call this mount 'B'
+		Next we have a third mount at /mnt with root dentry
+		mnt. Lets call this mount 'C'
+
+		'B' is the slave of 'A' and 'C' is a slave of 'B'
+		A -> B -> C
+
+		at this point if we execute the following command
+
+		mount --bind /bin /tmp/test
+
+		The mount is attempted on 'A'
+
+		will the mount propagate to 'B' and 'C' ?
+
+		what would be the contents of
+		/mnt/1/test be?
+
+7) FAQ
+
+	Q1. Why is bind mount needed? How is it different from symbolic links?
+		symbolic links can get stale if the destination mount gets
+		unmounted or moved. Bind mounts continue to exist even if the
+		other mount is unmounted or moved.
+
+	Q2. Why can't the shared subtree be implemented using exportfs?
+
+		exportfs is a heavyweight way of accomplishing part of what
+		shared subtree can do. I cannot imagine a way to implement the
+		semantics of slave mount using exportfs?
+
+	Q3 Why is unbindable mount needed?
+
+		Lets say we want to replicate the mount tree at multiple
+		locations within the same subtree.
+
+		if one rbind mounts a tree within the same subtree 'n' times
+		the number of mounts created is an exponential function of 'n'.
+		Having unbindable mount can help prune the unneeded bind
+		mounts. Here is a example.
+
+		step 1:
+		   lets say the root tree has just two directories with
+		   one vfsmount.
+				    root
+				   /    \
+				  tmp    usr
+
+		    And we want to replicate the tree at multiple
+		    mountpoints under /root/tmp
+
+		step2:
+		      mount --make-shared /root
+
+		      mkdir -p /tmp/m1
+
+		      mount --rbind /root /tmp/m1
+
+		      the new tree now looks like this:
+
+				    root
+				   /    \
+				 tmp    usr
+				/
+			       m1
+			      /  \
+			     tmp  usr
+			     /
+			    m1
+
+			  it has two vfsmounts
+
+		step3:
+			    mkdir -p /tmp/m2
+			    mount --rbind /root /tmp/m2
+
+			the new tree now looks like this:
+
+				      root
+				     /    \
+				   tmp     usr
+				  /    \
+				m1       m2
+			       / \       /  \
+			     tmp  usr   tmp  usr
+			     / \          /
+			    m1  m2      m1
+				/ \     /  \
+			      tmp usr  tmp   usr
+			      /        / \
+			     m1       m1  m2
+			    /  \
+			  tmp   usr
+			  /  \
+			 m1   m2
+
+		       it has 6 vfsmounts
+
+		step 4:
+			  mkdir -p /tmp/m3
+			  mount --rbind /root /tmp/m3
+
+			  I wont' draw the tree..but it has 24 vfsmounts
+
+
+		at step i the number of vfsmounts is V[i] = i*V[i-1].
+		This is an exponential function. And this tree has way more
+		mounts than what we really needed in the first place.
+
+		One could use a series of umount at each step to prune
+		out the unneeded mounts. But there is a better solution.
+		Unclonable mounts come in handy here.
+
+		step 1:
+		   lets say the root tree has just two directories with
+		   one vfsmount.
+				    root
+				   /    \
+				  tmp    usr
+
+		    How do we set up the same tree at multiple locations under
+		    /root/tmp
+
+		step2:
+		      mount --bind /root/tmp /root/tmp
+
+		      mount --make-rshared /root
+		      mount --make-unbindable /root/tmp
+
+		      mkdir -p /tmp/m1
+
+		      mount --rbind /root /tmp/m1
+
+		      the new tree now looks like this:
+
+				    root
+				   /    \
+				 tmp    usr
+				/
+			       m1
+			      /  \
+			     tmp  usr
+
+		step3:
+			    mkdir -p /tmp/m2
+			    mount --rbind /root /tmp/m2
+
+		      the new tree now looks like this:
+
+				    root
+				   /    \
+				 tmp    usr
+				/   \
+			       m1     m2
+			      /  \     / \
+			     tmp  usr tmp usr
+
+		step4:
+
+			    mkdir -p /tmp/m3
+			    mount --rbind /root /tmp/m3
+
+		      the new tree now looks like this:
+
+				    	  root
+				      /    	  \
+				     tmp    	   usr
+			         /    \    \
+			       m1     m2     m3
+			      /  \     / \    /  \
+			     tmp  usr tmp usr tmp usr
+
+8) Implementation
+
+8A) Datastructure
+
+	4 new fields are introduced to struct vfsmount
+	->mnt_share
+	->mnt_slave_list
+	->mnt_slave
+	->mnt_master
+
+	->mnt_share links togather all the mount to/from which this vfsmount
+		send/receives propagation events.
+
+	->mnt_slave_list links all the mounts to which this vfsmount propagates
+		to.
+
+	->mnt_slave links togather all the slaves that its master vfsmount
+		propagates to.
+
+	->mnt_master points to the master vfsmount from which this vfsmount
+		receives propagation.
+
+	->mnt_flags takes two more flags to indicate the propagation status of
+		the vfsmount.  MNT_SHARE indicates that the vfsmount is a shared
+		vfsmount.  MNT_UNCLONABLE indicates that the vfsmount cannot be
+		replicated.
+
+	All the shared vfsmounts in a peer group form a cyclic list through
+	->mnt_share.
+
+	All vfsmounts with the same ->mnt_master form on a cyclic list anchored
+	in ->mnt_master->mnt_slave_list and going through ->mnt_slave.
+
+	 ->mnt_master can point to arbitrary (and possibly different) members
+	 of master peer group.  To find all immediate slaves of a peer group
+	 you need to go through _all_ ->mnt_slave_list of its members.
+	 Conceptually it's just a single set - distribution among the
+	 individual lists does not affect propagation or the way propagation
+	 tree is modified by operations.
+
+	A example propagation tree looks as shown in the figure below.
+	[ NOTE: Though it looks like a forest, if we consider all the shared
+	mounts as a conceptual entity called 'pnode', it becomes a tree]
+
+
+		        A <--> B <--> C <---> D
+		       /|\	      /|      |\
+		      / F G	     J K      H I
+		     /
+		    E<-->K
+			/|\
+		       M L N
+
+	In the above figure  A,B,C and D all are shared and propagate to each
+	other.   'A' has got 3 slave mounts 'E' 'F' and 'G' 'C' has got 2 slave
+	mounts 'J' and 'K'  and  'D' has got two slave mounts 'H' and 'I'.
+	'E' is also shared with 'K' and they propagate to each other.  And
+	'K' has 3 slaves 'M', 'L' and 'N'
+
+	A's ->mnt_share links with the ->mnt_share of 'B' 'C' and 'D'
+
+	A's ->mnt_slave_list links with ->mnt_slave of 'E', 'K', 'F' and 'G'
+
+	E's ->mnt_share links with ->mnt_share of K
+	'E', 'K', 'F', 'G' have their ->mnt_master point to struct
+				vfsmount of 'A'
+	'M', 'L', 'N' have their ->mnt_master point to struct vfsmount of 'K'
+	K's ->mnt_slave_list links with ->mnt_slave of 'M', 'L' and 'N'
+
+	C's ->mnt_slave_list links with ->mnt_slave of 'J' and 'K'
+	J and K's ->mnt_master points to struct vfsmount of C
+	and finally D's ->mnt_slave_list links with ->mnt_slave of 'H' and 'I'
+	'H' and 'I' have their ->mnt_master pointing to struct vfsmount of 'D'.
+
+
+	NOTE: The propagation tree is orthogonal to the mount tree.
+
+
+8B Algorithm:
+
+	The crux of the implementation resides in rbind/move operation.
+
+	The overall algorithm breaks the operation into 3 phases: (look at
+	attach_recursive_mnt() and propagate_mnt())
+
+	1. prepare phase.
+	2. commit phases.
+	3. abort phases.
+
+	Prepare phase:
+
+	for each mount in the source tree:
+		   a) Create the necessary number of mount trees to
+		   	be attached to each of the mounts that receive
+			propagation from the destination mount.
+		   b) Do not attach any of the trees to its destination.
+		      However note down its ->mnt_parent and ->mnt_mountpoint
+		   c) Link all the new mounts to form a propagation tree that
+		      is identical to the propagation tree of the destination
+		      mount.
+
+		   If this phase is successful, there should be 'n' new
+		   propagation trees; where 'n' is the number of mounts in the
+		   source tree.  Go to the commit phase
+
+		   Also there should be 'm' new mount trees, where 'm' is
+		   the number of mounts to which the destination mount
+		   propagates to.
+
+		   if any memory allocations fail, go to the abort phase.
+
+	Commit phase
+		attach each of the mount trees to their corresponding
+		destination mounts.
+
+	Abort phase
+		delete all the newly created trees.
+
+	NOTE: all the propagation related functionality resides in the file
+	pnode.c
+
+
+------------------------------------------------------------------------
+
+version 0.1  (created the initial document, Ram Pai linuxram@us.ibm.com)
+version 0.2  (Incorporated comments from Al Viro)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 296bc03..91d5ef3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -324,7 +324,7 @@
 
 config SMP
 	bool "Symmetric Multi-Processing (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && BROKEN #&& n
+	depends on EXPERIMENTAL && REALVIEW_MPCORE
 	help
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
@@ -585,7 +585,7 @@
 
 config FPE_NWFPE_XP
 	bool "Support extended precision"
-	depends on FPE_NWFPE && !CPU_BIG_ENDIAN
+	depends on FPE_NWFPE
 	help
 	  Say Y to include 80-bit support in the kernel floating-point
 	  emulator.  Otherwise, only 32 and 64-bit support is compiled in.
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index df14096..6851aba 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -84,63 +84,54 @@
 		.virtual	= IXP2000_CAP_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_CAP_PHYS_BASE),
 		.length		= IXP2000_CAP_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}, {
 		.virtual	= IXP2000_INTCTL_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_INTCTL_PHYS_BASE),
 		.length		= IXP2000_INTCTL_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}, {
 		.virtual	= IXP2000_PCI_CREG_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_PCI_CREG_PHYS_BASE),
 		.length		= IXP2000_PCI_CREG_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}, {
 		.virtual	= IXP2000_PCI_CSR_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_PCI_CSR_PHYS_BASE),
 		.length		= IXP2000_PCI_CSR_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}, {
 		.virtual	= IXP2000_MSF_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_MSF_PHYS_BASE),
 		.length		= IXP2000_MSF_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}, {
 		.virtual	= IXP2000_PCI_IO_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_PCI_IO_PHYS_BASE),
 		.length		= IXP2000_PCI_IO_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}, {
 		.virtual	= IXP2000_PCI_CFG0_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_PCI_CFG0_PHYS_BASE),
 		.length		= IXP2000_PCI_CFG0_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}, {
 		.virtual	= IXP2000_PCI_CFG1_VIRT_BASE,
 		.pfn		= __phys_to_pfn(IXP2000_PCI_CFG1_PHYS_BASE),
 		.length		= IXP2000_PCI_CFG1_SIZE,
-		.type		= MT_DEVICE
+		.type		= MT_IXP2000_DEVICE,
 	}
 };
 
 void __init ixp2000_map_io(void)
 {
-	extern unsigned int processor_id;
-
 	/*
-	 * On IXP2400 CPUs we need to use MT_IXP2000_DEVICE for
-	 * tweaking the PMDs so XCB=101. On IXP2800s we use the normal
-	 * PMD flags.
+	 * On IXP2400 CPUs we need to use MT_IXP2000_DEVICE so that
+	 * XCB=101 (to avoid triggering erratum #66), and given that
+	 * this mode speeds up I/O accesses and we have write buffer
+	 * flushes in the right places anyway, it doesn't hurt to use
+	 * XCB=101 for all IXP2000s.
 	 */
-	if ((processor_id & 0xfffffff0) == 0x69054190) {
-		int i;
-
-		printk(KERN_INFO "Enabling IXP2400 erratum #66 workaround\n");
-
-		for(i=0;i<ARRAY_SIZE(ixp2000_io_desc);i++)
-			ixp2000_io_desc[i].type = MT_IXP2000_DEVICE;
-	}
-
 	iotable_init(ixp2000_io_desc, ARRAY_SIZE(ixp2000_io_desc));
 
 	/* Set slowport to 8-bit mode.  */
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index 4b63dc9..1299768 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -8,4 +8,13 @@
 	help
 	  Include support for the ARM(R) RealView Emulation Baseboard platform.
 
+config REALVIEW_MPCORE
+	bool "Support MPcore tile"
+	depends on MACH_REALVIEW_EB
+	help
+	  Enable support for the MPCore tile on the Realview platform.
+	  Since there are device address and interrupt differences, a
+	  kernel built with this option enabled is not compatible with
+	  other tiles.
+
 endmenu
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile
index 8d37ea1..011a85c 100644
--- a/arch/arm/mach-realview/Makefile
+++ b/arch/arm/mach-realview/Makefile
@@ -4,3 +4,4 @@
 
 obj-y					:= core.o clock.o
 obj-$(CONFIG_MACH_REALVIEW_EB)		+= realview_eb.o
+obj-$(CONFIG_SMP)			+= platsmp.o headsmp.o
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 575599d..d83e8ba 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -23,6 +23,7 @@
 #define __ASM_ARCH_REALVIEW_H
 
 #include <asm/hardware/amba.h>
+#include <asm/leds.h>
 #include <asm/io.h>
 
 #define __io_address(n)		__io(IO_ADDRESS(n))
diff --git a/arch/arm/mach-realview/headsmp.S b/arch/arm/mach-realview/headsmp.S
new file mode 100644
index 0000000..4075473
--- /dev/null
+++ b/arch/arm/mach-realview/headsmp.S
@@ -0,0 +1,39 @@
+/*
+ *  linux/arch/arm/mach-realview/headsmp.S
+ *
+ *  Copyright (c) 2003 ARM Limited
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	__INIT
+
+/*
+ * Realview specific entry point for secondary CPUs.  This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ */
+ENTRY(realview_secondary_startup)
+	mrc	p15, 0, r0, c0, c0, 5
+	and	r0, r0, #15
+	adr	r4, 1f
+	ldmia	r4, {r5, r6}
+	sub	r4, r4, r5
+	add	r6, r6, r4
+pen:	ldr	r7, [r6]
+	cmp	r7, r0
+	bne	pen
+
+	/*
+	 * we've been released from the holding pen: secondary_stack
+	 * should now contain the SVC stack for this core
+	 */
+	b	secondary_startup
+
+1:	.long	.
+	.long	pen_release
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
new file mode 100644
index 0000000..9844644
--- /dev/null
+++ b/arch/arm/mach-realview/platsmp.c
@@ -0,0 +1,195 @@
+/*
+ *  linux/arch/arm/mach-realview/platsmp.c
+ *
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/arm_scu.h>
+#include <asm/hardware.h>
+
+#include "core.h"
+
+extern void realview_secondary_startup(void);
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+
+static unsigned int __init get_core_count(void)
+{
+	unsigned int ncores;
+
+	ncores = __raw_readl(IO_ADDRESS(REALVIEW_MPCORE_SCU_BASE) + SCU_CONFIG);
+
+	return (ncores & 0x03) + 1;
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+	/*
+	 * the primary core may have used a "cross call" soft interrupt
+	 * to get this processor out of WFI in the BootMonitor - make
+	 * sure that we are no longer being sent this soft interrupt
+	 */
+	smp_cross_call_done(cpumask_of_cpu(cpu));
+
+	/*
+	 * if any interrupts are already enabled for the primary
+	 * core (e.g. timer irq), then they will not have been enabled
+	 * for us: do so
+	 */
+	gic_cpu_init(__io_address(REALVIEW_GIC_CPU_BASE));
+
+	/*
+	 * let the primary processor know we're out of the
+	 * pen, then head off into the C entry point
+	 */
+	pen_release = -1;
+
+	/*
+	 * Synchronise with the boot thread.
+	 */
+	spin_lock(&boot_lock);
+	spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	unsigned long timeout;
+
+	/*
+	 * set synchronisation state between this boot processor
+	 * and the secondary one
+	 */
+	spin_lock(&boot_lock);
+
+	/*
+	 * The secondary processor is waiting to be released from
+	 * the holding pen - release it, then wait for it to flag
+	 * that it has been released by resetting pen_release.
+	 *
+	 * Note that "pen_release" is the hardware CPU ID, whereas
+	 * "cpu" is Linux's internal ID.
+	 */
+	pen_release = cpu;
+	flush_cache_all();
+
+	/*
+	 * XXX
+	 *
+	 * This is a later addition to the booting protocol: the
+	 * bootMonitor now puts secondary cores into WFI, so
+	 * poke_milo() no longer gets the cores moving; we need
+	 * to send a soft interrupt to wake the secondary core.
+	 * Use smp_cross_call() for this, since there's little
+	 * point duplicating the code here
+	 */
+	smp_cross_call(cpumask_of_cpu(cpu));
+
+	timeout = jiffies + (1 * HZ);
+	while (time_before(jiffies, timeout)) {
+		if (pen_release == -1)
+			break;
+
+		udelay(10);
+	}
+
+	/*
+	 * now the secondary core is starting up let it run its
+	 * calibrations, then wait for it to finish
+	 */
+	spin_unlock(&boot_lock);
+
+	return pen_release != -1 ? -ENOSYS : 0;
+}
+
+static void __init poke_milo(void)
+{
+	extern void secondary_startup(void);
+
+	/* nobody is to be released from the pen yet */
+	pen_release = -1;
+
+	/*
+	 * write the address of secondary startup into the system-wide
+	 * flags register, then clear the bottom two bits, which is what
+	 * BootMonitor is waiting for
+	 */
+#if 1
+#define REALVIEW_SYS_FLAGSS_OFFSET 0x30
+	__raw_writel(virt_to_phys(realview_secondary_startup),
+		     (IO_ADDRESS(REALVIEW_SYS_BASE) +
+		      REALVIEW_SYS_FLAGSS_OFFSET));
+#define REALVIEW_SYS_FLAGSC_OFFSET 0x34
+	__raw_writel(3,
+		     (IO_ADDRESS(REALVIEW_SYS_BASE) +
+		      REALVIEW_SYS_FLAGSC_OFFSET));
+#endif
+
+	mb();
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	unsigned int ncores = get_core_count();
+	unsigned int cpu = smp_processor_id();
+	int i;
+
+	/* sanity check */
+	if (ncores == 0) {
+		printk(KERN_ERR
+		       "Realview: strange CM count of 0? Default to 1\n");
+
+		ncores = 1;
+	}
+
+	if (ncores > NR_CPUS) {
+		printk(KERN_WARNING
+		       "Realview: no. of cores (%d) greater than configured "
+		       "maximum of %d - clipping\n",
+		       ncores, NR_CPUS);
+		ncores = NR_CPUS;
+	}
+
+	smp_store_cpu_info(cpu);
+
+	/*
+	 * are we trying to boot more cores than exist?
+	 */
+	if (max_cpus > ncores)
+		max_cpus = ncores;
+
+	/*
+	 * Initialise the possible/present maps.
+	 * cpu_possible_map describes the set of CPUs which may be present
+	 * cpu_present_map describes the set of CPUs populated
+	 */
+	for (i = 0; i < max_cpus; i++) {
+		cpu_set(i, cpu_possible_map);
+		cpu_set(i, cpu_present_map);
+	}
+
+	/*
+	 * Do we need any more CPUs? If so, then let them know where
+	 * to start. Note that, on modern versions of MILO, the "poke"
+	 * doesn't actually do anything until each individual core is
+	 * sent a soft interrupt to get it out of WFI
+	 */
+	if (max_cpus > 1)
+		poke_milo();
+}
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 267bb07..7dc3250 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -136,6 +136,11 @@
 
 static void __init gic_init_irq(void)
 {
+#ifdef CONFIG_REALVIEW_MPCORE
+	writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
+	writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8);
+	writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
+#endif
 	gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
 	gic_cpu_init(__io_address(REALVIEW_GIC_CPU_BASE));
 }
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index fb5b402..9e50127 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -354,7 +354,7 @@
 {
 	struct cachepolicy *cp;
 	unsigned int cr = get_cr();
-	unsigned int user_pgprot;
+	unsigned int user_pgprot, kern_pgprot;
 	int cpu_arch = cpu_architecture();
 	int i;
 
@@ -381,7 +381,7 @@
 	}
 
 	cp = &cache_policies[cachepolicy];
-	user_pgprot = cp->pte;
+	kern_pgprot = user_pgprot = cp->pte;
 
 	/*
 	 * ARMv6 and above have extended page tables.
@@ -393,6 +393,7 @@
 		 */
 		mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
 		mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
+
 		/*
 		 * Mark cache clean areas and XIP ROM read only
 		 * from SVC mode and no access from userspace.
@@ -412,32 +413,47 @@
 		 * (iow, non-global)
 		 */
 		user_pgprot |= L_PTE_ASID;
+
+#ifdef CONFIG_SMP
+		/*
+		 * Mark memory with the "shared" attribute for SMP systems
+		 */
+		user_pgprot |= L_PTE_SHARED;
+		kern_pgprot |= L_PTE_SHARED;
+		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+#endif
 	}
 
+	for (i = 0; i < 16; i++) {
+		unsigned long v = pgprot_val(protection_map[i]);
+		v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
+		protection_map[i] = __pgprot(v);
+	}
+
+	mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
+	mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
+
 	if (cpu_arch >= CPU_ARCH_ARMv5) {
-		mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
-		mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+#ifndef CONFIG_SMP
+		/*
+		 * Only use write-through for non-SMP systems
+		 */
+		mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
+		mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
+#endif
 	} else {
-		mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
-		mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
 		mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
 	}
 
+	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
+				 L_PTE_DIRTY | L_PTE_WRITE |
+				 L_PTE_EXEC | kern_pgprot);
+
 	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 
-	for (i = 0; i < 16; i++) {
-		unsigned long v = pgprot_val(protection_map[i]);
-		v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
-		protection_map[i] = __pgprot(v);
-	}
-
-	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
-				 L_PTE_DIRTY | L_PTE_WRITE |
-				 L_PTE_EXEC | cp->pte);
-
 	switch (cp->pmd) {
 	case PMD_SECT_WT:
 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 9bb5fff..92f3ca3 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
+#include <asm/hardware/arm_scu.h>
 #include <asm/procinfo.h>
 #include <asm/pgtable.h>
 
@@ -112,6 +113,9 @@
 ENTRY(cpu_v6_switch_mm)
 	mov	r2, #0
 	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id
+#ifdef CONFIG_SMP
+	orr	r0, r0, #2			@ set shared pgtable
+#endif
 	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
 	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
 	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
@@ -140,7 +144,7 @@
 ENTRY(cpu_v6_set_pte)
 	str	r1, [r0], #-2048		@ linux version
 
-	bic	r2, r1, #0x000007f0
+	bic	r2, r1, #0x000003f0
 	bic	r2, r2, #0x00000003
 	orr	r2, r2, #PTE_EXT_AP0 | 2
 
@@ -191,6 +195,23 @@
  *	- cache type register is implemented
  */
 __v6_setup:
+#ifdef CONFIG_SMP
+	/* Set up the SCU on core 0 only */
+	mrc	p15, 0, r0, c0, c0, 5		@ CPU core number
+	ands	r0, r0, #15
+	moveq	r0, #0x10000000 @ SCU_BASE
+	orreq	r0, r0, #0x00100000
+	ldreq	r5, [r0, #SCU_CTRL]
+	orreq	r5, r5, #1
+	streq	r5, [r0, #SCU_CTRL]
+
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+	mrc	p15, 0, r0, c1, c0, 1		@ Enable SMP/nAMP mode
+	orr	r0, r0, #0x20
+	mcr	p15, 0, r0, c1, c0, 1
+#endif
+#endif
+
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c14, 0		@ clean+invalidate D cache
 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
@@ -198,6 +219,9 @@
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	mcr	p15, 0, r0, c8, c7, 0		@ invalidate I + D TLBs
 	mcr	p15, 0, r0, c2, c0, 2		@ TTB control register
+#ifdef CONFIG_SMP
+	orr	r4, r4, #2			@ set shared pgtable
+#endif
 	mcr	p15, 0, r4, c2, c0, 1		@ load TTB1
 #ifdef CONFIG_VFP
 	mrc	p15, 0, r0, c1, c0, 2
diff --git a/arch/arm/nwfpe/fpa11.h b/arch/arm/nwfpe/fpa11.h
index 9677ae8..da4c616 100644
--- a/arch/arm/nwfpe/fpa11.h
+++ b/arch/arm/nwfpe/fpa11.h
@@ -60,7 +60,7 @@
 #ifdef CONFIG_FPE_NWFPE_XP
 	floatx80 fExtended;
 #else
-	int padding[3];
+	u32 padding[3];
 #endif
 } FPREG;
 
diff --git a/arch/arm/nwfpe/fpa11_cpdt.c b/arch/arm/nwfpe/fpa11_cpdt.c
index b0db5cb..32859fa 100644
--- a/arch/arm/nwfpe/fpa11_cpdt.c
+++ b/arch/arm/nwfpe/fpa11_cpdt.c
@@ -59,8 +59,13 @@
 	p = (unsigned int *) &fpa11->fpreg[Fn].fExtended;
 	fpa11->fType[Fn] = typeExtended;
 	get_user(p[0], &pMem[0]);	/* sign & exponent */
+#ifdef __ARMEB__
+	get_user(p[1], &pMem[1]);	/* ms bits */
+	get_user(p[2], &pMem[2]);	/* ls bits */
+#else
 	get_user(p[1], &pMem[2]);	/* ls bits */
 	get_user(p[2], &pMem[1]);	/* ms bits */
+#endif
 }
 #endif
 
@@ -177,8 +182,13 @@
 	}
 
 	put_user(val.i[0], &pMem[0]);	/* sign & exp */
+#ifdef __ARMEB__
+	put_user(val.i[1], &pMem[1]);	/* msw */
+	put_user(val.i[2], &pMem[2]);
+#else
 	put_user(val.i[1], &pMem[2]);
 	put_user(val.i[2], &pMem[1]);	/* msw */
+#endif
 }
 #endif
 
diff --git a/arch/arm/nwfpe/fpopcode.c b/arch/arm/nwfpe/fpopcode.c
index 4c9f570..67ff2ab 100644
--- a/arch/arm/nwfpe/fpopcode.c
+++ b/arch/arm/nwfpe/fpopcode.c
@@ -29,14 +29,14 @@
 
 #ifdef CONFIG_FPE_NWFPE_XP
 const floatx80 floatx80Constant[] = {
-	{0x0000, 0x0000000000000000ULL},	/* extended 0.0 */
-	{0x3fff, 0x8000000000000000ULL},	/* extended 1.0 */
-	{0x4000, 0x8000000000000000ULL},	/* extended 2.0 */
-	{0x4000, 0xc000000000000000ULL},	/* extended 3.0 */
-	{0x4001, 0x8000000000000000ULL},	/* extended 4.0 */
-	{0x4001, 0xa000000000000000ULL},	/* extended 5.0 */
-	{0x3ffe, 0x8000000000000000ULL},	/* extended 0.5 */
-	{0x4002, 0xa000000000000000ULL}		/* extended 10.0 */
+	{ .high = 0x0000, .low = 0x0000000000000000ULL},/* extended 0.0 */
+	{ .high = 0x3fff, .low = 0x8000000000000000ULL},/* extended 1.0 */
+	{ .high = 0x4000, .low = 0x8000000000000000ULL},/* extended 2.0 */
+	{ .high = 0x4000, .low = 0xc000000000000000ULL},/* extended 3.0 */
+	{ .high = 0x4001, .low = 0x8000000000000000ULL},/* extended 4.0 */
+	{ .high = 0x4001, .low = 0xa000000000000000ULL},/* extended 5.0 */
+	{ .high = 0x3ffe, .low = 0x8000000000000000ULL},/* extended 0.5 */
+	{ .high = 0x4002, .low = 0xa000000000000000ULL},/* extended 10.0 */
 };
 #endif
 
diff --git a/arch/arm/nwfpe/softfloat-specialize b/arch/arm/nwfpe/softfloat-specialize
index acf4091..d4a4c8e 100644
--- a/arch/arm/nwfpe/softfloat-specialize
+++ b/arch/arm/nwfpe/softfloat-specialize
@@ -332,6 +332,7 @@
 
     z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 );
     z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF;
+    z.__padding = 0;
     return z;
 
 }
diff --git a/arch/arm/nwfpe/softfloat.c b/arch/arm/nwfpe/softfloat.c
index f9f0491..0f9656e 100644
--- a/arch/arm/nwfpe/softfloat.c
+++ b/arch/arm/nwfpe/softfloat.c
@@ -531,6 +531,7 @@
 
     z.low = zSig;
     z.high = ( ( (bits16) zSign )<<15 ) + zExp;
+    z.__padding = 0;
     return z;
 
 }
@@ -2831,6 +2832,7 @@
         roundData->exception |= float_flag_invalid;
         z.low = floatx80_default_nan_low;
         z.high = floatx80_default_nan_high;
+        z.__padding = 0;
         return z;
     }
     if ( aExp == 0 ) {
@@ -2950,6 +2952,7 @@
             roundData->exception |= float_flag_invalid;
             z.low = floatx80_default_nan_low;
             z.high = floatx80_default_nan_high;
+            z.__padding = 0;
             return z;
         }
         return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) );
@@ -3015,6 +3018,7 @@
                 roundData->exception |= float_flag_invalid;
                 z.low = floatx80_default_nan_low;
                 z.high = floatx80_default_nan_high;
+                z.__padding = 0;
                 return z;
             }
             roundData->exception |= float_flag_divbyzero;
@@ -3093,6 +3097,7 @@
             roundData->exception |= float_flag_invalid;
             z.low = floatx80_default_nan_low;
             z.high = floatx80_default_nan_high;
+            z.__padding = 0;
             return z;
         }
         normalizeFloatx80Subnormal( bSig, &bExp, &bSig );
@@ -3184,6 +3189,7 @@
         roundData->exception |= float_flag_invalid;
         z.low = floatx80_default_nan_low;
         z.high = floatx80_default_nan_high;
+        z.__padding = 0;
         return z;
     }
     if ( aExp == 0 ) {
diff --git a/arch/arm/nwfpe/softfloat.h b/arch/arm/nwfpe/softfloat.h
index 1415170..978c699 100644
--- a/arch/arm/nwfpe/softfloat.h
+++ b/arch/arm/nwfpe/softfloat.h
@@ -51,11 +51,17 @@
 Software IEC/IEEE floating-point types.
 -------------------------------------------------------------------------------
 */
-typedef unsigned long int float32;
-typedef unsigned long long float64;
+typedef u32 float32;
+typedef u64 float64;
 typedef struct {
-    unsigned short high;
-    unsigned long long low;
+#ifdef __ARMEB__
+    u16 __padding;
+    u16 high;
+#else
+    u16 high;
+    u16 __padding;
+#endif
+    u64 low;
 } floatx80;
 
 /*
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index caa9f77..871366b 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -377,10 +377,9 @@
         arg0.buffer.length = 12;
         arg0.buffer.pointer = (u8 *) arg0_buf;
 
-	data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+	data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
 	if (!data)
 		return (-ENOMEM);
-	memset(data, 0, sizeof(struct cpufreq_acpi_io));
 
 	acpi_io_data[cpu] = data;
 
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
index 73a5dc5..edcd626 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -171,10 +171,9 @@
 	unsigned int speed;
 	u8 fid, vid;
 
-	powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL);
+	powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL);
 	if (!powernow_table)
 		return -ENOMEM;
-	memset(powernow_table, 0, (sizeof(struct cpufreq_frequency_table) * (number_scales + 1)));
 
 	for (j=0 ; j < number_scales; j++) {
 		fid = *pst++;
@@ -305,16 +304,13 @@
 		goto err0;
 	}
 
-	acpi_processor_perf = kmalloc(sizeof(struct acpi_processor_performance),
+	acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance),
 				      GFP_KERNEL);
-
 	if (!acpi_processor_perf) {
 		retval = -ENOMEM;
 		goto err0;
 	}
 
-	memset(acpi_processor_perf, 0, sizeof(struct acpi_processor_performance));
-
 	if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
 		retval = -EIO;
 		goto err1;
@@ -337,14 +333,12 @@
 		goto err2;
 	}
 
-	powernow_table = kmalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL);
+	powernow_table = kzalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL);
 	if (!powernow_table) {
 		retval = -ENOMEM;
 		goto err2;
 	}
 
-	memset(powernow_table, 0, ((number_scales + 1) * sizeof(struct cpufreq_frequency_table)));
-
 	pc.val = (unsigned long) acpi_processor_perf->states[0].control;
 	for (i = 0; i < number_scales; i++) {
 		u8 fid, vid;
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 2d5c9ad..68a1fc8 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -462,7 +462,6 @@
 
 	oldmask = current->cpus_allowed;
 	set_cpus_allowed(current, cpumask_of_cpu(cpu));
-	schedule();
 
 	if (smp_processor_id() != cpu) {
 		printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
@@ -497,9 +496,7 @@
 
 out:
 	set_cpus_allowed(current, oldmask);
-	schedule();
 	return rc;
-
 }
 
 static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
@@ -913,7 +910,6 @@
 	/* only run on specific CPU from here on */
 	oldmask = current->cpus_allowed;
 	set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
-	schedule();
 
 	if (smp_processor_id() != pol->cpu) {
 		printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -968,8 +964,6 @@
 
 err_out:
 	set_cpus_allowed(current, oldmask);
-	schedule();
-
 	return ret;
 }
 
@@ -991,12 +985,11 @@
 	if (!check_supported_cpu(pol->cpu))
 		return -ENODEV;
 
-	data = kmalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
+	data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
 	if (!data) {
 		printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
 		return -ENOMEM;
 	}
-	memset(data,0,sizeof(struct powernow_k8_data));
 
 	data->cpu = pol->cpu;
 
@@ -1026,7 +1019,6 @@
 	/* only run on specific CPU from here on */
 	oldmask = current->cpus_allowed;
 	set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
-	schedule();
 
 	if (smp_processor_id() != pol->cpu) {
 		printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -1045,7 +1037,6 @@
 
 	/* run on any CPU again */
 	set_cpus_allowed(current, oldmask);
-	schedule();
 
 	pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
 	pol->cpus = cpu_core_map[pol->cpu];
@@ -1080,7 +1071,6 @@
 
 err_out:
 	set_cpus_allowed(current, oldmask);
-	schedule();
 	powernow_k8_cpu_exit_acpi(data);
 
 	kfree(data);
@@ -1116,17 +1106,14 @@
 		set_cpus_allowed(current, oldmask);
 		return 0;
 	}
-	preempt_disable();
-	
+
 	if (query_current_values_with_pending_wait(data))
 		goto out;
 
 	khz = find_khz_freq_from_fid(data->currfid);
 
- out:
-	preempt_enable_no_resched();
+out:
 	set_cpus_allowed(current, oldmask);
-
 	return khz;
 }
 
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 0ea010a..edb9873 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -423,12 +423,11 @@
 		}
 	}
 
-	centrino_model[cpu] = kmalloc(sizeof(struct cpu_model), GFP_KERNEL);
+	centrino_model[cpu] = kzalloc(sizeof(struct cpu_model), GFP_KERNEL);
 	if (!centrino_model[cpu]) {
 		result = -ENOMEM;
 		goto err_unreg;
 	}
-	memset(centrino_model[cpu], 0, sizeof(struct cpu_model));
 
 	centrino_model[cpu]->model_name=NULL;
 	centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9f2093c..d4de8a4 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -191,6 +191,7 @@
 
 config IA64_SGI_SN_XP
 	tristate "Support communication between SGI SSIs"
+	depends on IA64_GENERIC || IA64_SGI_SN2
 	select IA64_UNCACHED_ALLOCATOR
 	help
 	  An SGI machine can be divided into multiple Single System
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6537445..3cfb8be 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -201,6 +201,14 @@
 	  Only choose N if you know in advance that you will not need to modify
 	  OpenPROM settings on the running system.
 
+config SPARC_LED
+	tristate "Sun4m LED driver"
+	help
+	  This driver toggles the front-panel LED on sun4m systems
+	  in a user-specifyable manner.  It's state can be probed
+	  by reading /proc/led and it's blinking mode can be changed
+	  via writes to /proc/led
+
 source "fs/Kconfig.binfmt"
 
 config SUNOS_EMUL
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 3d22ba2..1b83e21 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_PCI) += ebus.o
 obj-$(CONFIG_SUN_PM) += apc.o pmc.o
 obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o
+obj-$(CONFIG_SPARC_LED) += led.o
 
 ifdef CONFIG_SUNOS_EMUL
 obj-y += sys_sunos.o sunos_ioctl.o
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
new file mode 100644
index 0000000..2a3afca
--- /dev/null
+++ b/arch/sparc/kernel/led.c
@@ -0,0 +1,139 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+
+#include <asm/auxio.h>
+
+#define LED_MAX_LENGTH 8 /* maximum chars written to proc file */
+
+static inline void led_toggle(void)
+{
+	unsigned char val = get_auxio();
+	unsigned char on, off;
+
+	if (val & AUXIO_LED) {
+		on = 0;
+		off = AUXIO_LED;
+	} else {
+		on = AUXIO_LED;
+		off = 0;
+	}
+
+	set_auxio(on, off);
+}
+
+static struct timer_list led_blink_timer;
+
+static void led_blink(unsigned long timeout)
+{
+	led_toggle();
+
+	/* reschedule */
+	if (!timeout) { /* blink according to load */
+		led_blink_timer.expires = jiffies +
+			((1 + (avenrun[0] >> FSHIFT)) * HZ);
+		led_blink_timer.data = 0;
+	} else { /* blink at user specified interval */
+		led_blink_timer.expires = jiffies + (timeout * HZ);
+		led_blink_timer.data = timeout;
+	}
+	add_timer(&led_blink_timer);
+}
+
+static int led_read_proc(char *buf, char **start, off_t offset, int count,
+			 int *eof, void *data)
+{
+	int len = 0;
+
+	if (get_auxio() & AUXIO_LED)
+		len = sprintf(buf, "on\n");
+	else
+		len = sprintf(buf, "off\n");
+
+	return len;
+}
+
+static int led_write_proc(struct file *file, const char *buffer,
+			  unsigned long count, void *data)
+{
+	char *buf = NULL;
+
+	if (count > LED_MAX_LENGTH)
+		count = LED_MAX_LENGTH;
+
+	buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, buffer, count)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	buf[count] = '\0';
+
+	/* work around \n when echo'ing into proc */
+	if (buf[count - 1] == '\n')
+		buf[count - 1] = '\0';
+
+	/* before we change anything we want to stop any running timers,
+	 * otherwise calls such as on will have no persistent effect
+	 */
+	del_timer_sync(&led_blink_timer);
+
+	if (!strcmp(buf, "on")) {
+		auxio_set_led(AUXIO_LED_ON);
+	} else if (!strcmp(buf, "toggle")) {
+		led_toggle();
+	} else if ((*buf > '0') && (*buf <= '9')) {
+		led_blink(simple_strtoul(buf, NULL, 10));
+	} else if (!strcmp(buf, "load")) {
+		led_blink(0);
+	} else {
+		auxio_set_led(AUXIO_LED_OFF);
+	}
+
+	kfree(buf);
+
+	return count;
+}
+
+static struct proc_dir_entry *led;
+
+#define LED_VERSION	"0.1"
+
+static int __init led_init(void)
+{
+	init_timer(&led_blink_timer);
+	led_blink_timer.function = led_blink;
+
+	led = create_proc_entry("led", 0, NULL);
+	if (!led)
+		return -ENOMEM;
+
+	led->read_proc = led_read_proc; /* reader function */
+	led->write_proc = led_write_proc; /* writer function */
+	led->owner = THIS_MODULE;
+
+	printk(KERN_INFO
+	       "led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n",
+	       LED_VERSION);
+
+	return 0;
+}
+
+static void __exit led_exit(void)
+{
+	remove_proc_entry("led", NULL);
+	del_timer_sync(&led_blink_timer);
+}
+
+module_init(led_init);
+module_exit(led_exit);
+
+MODULE_AUTHOR("Lars Kotthoff <metalhead@metalhead.ws>");
+MODULE_DESCRIPTION("Provides control of the front LED on SPARC systems.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LED_VERSION);
diff --git a/arch/sparc/kernel/sunos_ioctl.c b/arch/sparc/kernel/sunos_ioctl.c
index df1c0b3..a6ba3d2 100644
--- a/arch/sparc/kernel/sunos_ioctl.c
+++ b/arch/sparc/kernel/sunos_ioctl.c
@@ -23,7 +23,6 @@
 #include <linux/smp_lock.h>
 #include <linux/syscalls.h>
 #include <linux/file.h>
-#include <asm/kbio.h>
 
 #if 0
 extern char sunkbd_type;
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index e6a0032..92e2630 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -11,33 +11,14 @@
 
 #define INCLUDES
 #include "compat_ioctl.c"
-#include <linux/ncp_fs.h>
 #include <linux/syscalls.h>
 #include <asm/fbio.h>
-#include <asm/kbio.h>
-#include <asm/vuid_event.h>
-#include <asm/envctrl.h>
-#include <asm/display7seg.h>
-#include <asm/openpromio.h>
-#include <asm/audioio.h>
-#include <asm/watchdog.h>
 
 /* Use this to get at 32-bit user passed pointers. 
  * See sys_sparc32.c for description about it.
  */
 #define A(__x) compat_ptr(__x)
 
-static __inline__ void *alloc_user_space(long len)
-{
-	struct pt_regs *regs = current_thread_info()->kregs;
-	unsigned long usp = regs->u_regs[UREG_I6];
-
-	if (!(test_thread_flag(TIF_32BIT)))
-		usp += STACK_BIAS;
-
-	return (void *) (usp - len);
-}
-
 #define CODE
 #include "compat_ioctl.c"
 
@@ -111,357 +92,6 @@
 	return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
 }
 
-#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
-/* This really belongs in include/linux/drm.h -DaveM */
-#include "../../../drivers/char/drm/drm.h"
-
-typedef struct drm32_version {
-	int    version_major;	  /* Major version			    */
-	int    version_minor;	  /* Minor version			    */
-	int    version_patchlevel;/* Patch level			    */
-	int    name_len;	  /* Length of name buffer		    */
-	u32    name;		  /* Name of driver			    */
-	int    date_len;	  /* Length of date buffer		    */
-	u32    date;		  /* User-space buffer to hold date	    */
-	int    desc_len;	  /* Length of desc buffer		    */
-	u32    desc;		  /* User-space buffer to hold desc	    */
-} drm32_version_t;
-#define DRM32_IOCTL_VERSION    DRM_IOWR(0x00, drm32_version_t)
-
-static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
-	drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
-	compat_uptr_t addr;
-	int n;
-	int ret;
-
-	if (clear_user(p, 3 * sizeof(int)) ||
-	    get_user(n, &uversion->name_len) ||
-	    put_user(n, &p->name_len) ||
-	    get_user(addr, &uversion->name) ||
-	    put_user(compat_ptr(addr), &p->name) ||
-	    get_user(n, &uversion->date_len) ||
-	    put_user(n, &p->date_len) ||
-	    get_user(addr, &uversion->date) ||
-	    put_user(compat_ptr(addr), &p->date) ||
-	    get_user(n, &uversion->desc_len) ||
-	    put_user(n, &p->desc_len) ||
-	    get_user(addr, &uversion->desc) ||
-	    put_user(compat_ptr(addr), &p->desc))
-		return -EFAULT;
-
-        ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
-	if (ret)
-		return ret;
-
-	if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
-	    get_user(n, &p->name_len) ||
-	    put_user(n, &uversion->name_len) ||
-	    get_user(n, &p->date_len) ||
-	    put_user(n, &uversion->date_len) ||
-	    get_user(n, &p->desc_len) ||
-	    put_user(n, &uversion->desc_len))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm32_unique {
-	int	unique_len;	  /* Length of unique			    */
-	u32	unique;		  /* Unique name for driver instantiation   */
-} drm32_unique_t;
-#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
-#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
-
-static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
-	drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
-	compat_uptr_t addr;
-	int n;
-	int ret;
-
-	if (get_user(n, &uarg->unique_len) ||
-	    put_user(n, &p->unique_len) ||
-	    get_user(addr, &uarg->unique) ||
-	    put_user(compat_ptr(addr), &p->unique))
-		return -EFAULT;
-
-	if (cmd == DRM32_IOCTL_GET_UNIQUE)
-		ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
-	else
-		ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
-
-	if (ret)
-		return ret;
-
-	if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm32_map {
-	u32		offset;	 /* Requested physical address (0 for SAREA)*/
-	u32		size;	 /* Requested physical size (bytes)	    */
-	drm_map_type_t	type;	 /* Type of memory to map		    */
-	drm_map_flags_t flags;	 /* Flags				    */
-	u32		handle;  /* User-space: "Handle" to pass to mmap    */
-				 /* Kernel-space: kernel-virtual address    */
-	int		mtrr;	 /* MTRR slot used			    */
-				 /* Private data			    */
-} drm32_map_t;
-#define DRM32_IOCTL_ADD_MAP    DRM_IOWR(0x15, drm32_map_t)
-
-static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_map_t __user *uarg = (drm32_map_t __user *) arg;
-	drm_map_t karg;
-	mm_segment_t old_fs;
-	u32 tmp;
-	int ret;
-
-	ret  = get_user(karg.offset, &uarg->offset);
-	ret |= get_user(karg.size, &uarg->size);
-	ret |= get_user(karg.type, &uarg->type);
-	ret |= get_user(karg.flags, &uarg->flags);
-	ret |= get_user(tmp, &uarg->handle);
-	ret |= get_user(karg.mtrr, &uarg->mtrr);
-	if (ret)
-		return -EFAULT;
-
-	karg.handle = (void *) (unsigned long) tmp;
-
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
-	set_fs(old_fs);
-
-	if (!ret) {
-		ret  = put_user(karg.offset, &uarg->offset);
-		ret |= put_user(karg.size, &uarg->size);
-		ret |= put_user(karg.type, &uarg->type);
-		ret |= put_user(karg.flags, &uarg->flags);
-		tmp = (u32) (long)karg.handle;
-		ret |= put_user(tmp, &uarg->handle);
-		ret |= put_user(karg.mtrr, &uarg->mtrr);
-		if (ret)
-			ret = -EFAULT;
-	}
-
-	return ret;
-}
-
-typedef struct drm32_buf_info {
-	int	       count;	/* Entries in list			     */
-	u32	       list;    /* (drm_buf_desc_t *) */ 
-} drm32_buf_info_t;
-#define DRM32_IOCTL_INFO_BUFS  DRM_IOWR(0x18, drm32_buf_info_t)
-
-static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
-	drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
-	compat_uptr_t addr;
-	int n;
-	int ret;
-
-	if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
-	    get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
-		return -EFAULT;
-
-	ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
-	if (ret)
-		return ret;
-
-	if (get_user(n, &p->count) || put_user(n, &uarg->count))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm32_buf_free {
-	int	       count;
-	u32	       list;	/* (int *) */
-} drm32_buf_free_t;
-#define DRM32_IOCTL_FREE_BUFS  DRM_IOW( 0x1a, drm32_buf_free_t)
-
-static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
-	drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
-	compat_uptr_t addr;
-	int n;
-
-	if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
-	    get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
-		return -EFAULT;
-
-	return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
-}
-
-typedef struct drm32_buf_pub {
-	int		  idx;	       /* Index into master buflist	     */
-	int		  total;       /* Buffer size			     */
-	int		  used;	       /* Amount of buffer in use (for DMA)  */
-	u32		  address;     /* Address of buffer (void *)	     */
-} drm32_buf_pub_t;
-
-typedef struct drm32_buf_map {
-	int	      count;	/* Length of buflist			    */
-	u32	      virtual;	/* Mmaped area in user-virtual (void *)	    */
-	u32 	      list;	/* Buffer information (drm_buf_pub_t *)	    */
-} drm32_buf_map_t;
-#define DRM32_IOCTL_MAP_BUFS   DRM_IOWR(0x19, drm32_buf_map_t)
-
-static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
-	drm32_buf_pub_t __user *ulist;
-	drm_buf_map_t __user *arg64;
-	drm_buf_pub_t __user *list;
-	int orig_count, ret, i;
-	int n;
-	compat_uptr_t addr;
-
-	if (get_user(orig_count, &uarg->count))
-		return -EFAULT;
-
-	arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
-				(size_t)orig_count * sizeof(drm_buf_pub_t));
-	list = (void __user *)(arg64 + 1);
-
-	if (put_user(orig_count, &arg64->count) ||
-	    put_user(list, &arg64->list) ||
-	    get_user(addr, &uarg->virtual) ||
-	    put_user(compat_ptr(addr), &arg64->virtual) ||
-	    get_user(addr, &uarg->list))
-		return -EFAULT;
-
-	ulist = compat_ptr(addr);
-
-	for (i = 0; i < orig_count; i++) {
-		if (get_user(n, &ulist[i].idx) ||
-		    put_user(n, &list[i].idx) ||
-		    get_user(n, &ulist[i].total) ||
-		    put_user(n, &list[i].total) ||
-		    get_user(n, &ulist[i].used) ||
-		    put_user(n, &list[i].used) ||
-		    get_user(addr, &ulist[i].address) ||
-		    put_user(compat_ptr(addr), &list[i].address))
-			return -EFAULT;
-	}
-
-	ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < orig_count; i++) {
-		void __user *p;
-		if (get_user(n, &list[i].idx) ||
-		    put_user(n, &ulist[i].idx) ||
-		    get_user(n, &list[i].total) ||
-		    put_user(n, &ulist[i].total) ||
-		    get_user(n, &list[i].used) ||
-		    put_user(n, &ulist[i].used) ||
-		    get_user(p, &list[i].address) ||
-		    put_user((unsigned long)p, &ulist[i].address))
-			return -EFAULT;
-	}
-
-	if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm32_dma {
-				/* Indices here refer to the offset into
-				   buflist in drm_buf_get_t.  */
-	int		context;	  /* Context handle		    */
-	int		send_count;	  /* Number of buffers to send	    */
-	u32		send_indices;	  /* List of handles to buffers (int *) */
-	u32		send_sizes;	  /* Lengths of data to send (int *) */
-	drm_dma_flags_t flags;		  /* Flags			    */
-	int		request_count;	  /* Number of buffers requested    */
-	int		request_size;	  /* Desired size for buffers	    */
-	u32		request_indices;  /* Buffer information (int *)	    */
-	u32		request_sizes;    /* (int *) */
-	int		granted_count;	  /* Number of buffers granted	    */
-} drm32_dma_t;
-#define DRM32_IOCTL_DMA	     DRM_IOWR(0x29, drm32_dma_t)
-
-/* RED PEN	The DRM layer blindly dereferences the send/request
- * 		index/size arrays even though they are userland
- * 		pointers.  -DaveM
- */
-static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
-	drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
-	compat_uptr_t addr;
-	int ret;
-
-	if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
-	    get_user(addr, &uarg->send_indices) ||
-	    put_user(compat_ptr(addr), &p->send_indices) ||
-	    get_user(addr, &uarg->send_sizes) ||
-	    put_user(compat_ptr(addr), &p->send_sizes) ||
-	    copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
-	    copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
-	    copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
-	    get_user(addr, &uarg->request_indices) ||
-	    put_user(compat_ptr(addr), &p->request_indices) ||
-	    get_user(addr, &uarg->request_sizes) ||
-	    put_user(compat_ptr(addr), &p->request_sizes) ||
-	    copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
-		return -EFAULT;
-
-	ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
-	if (ret)
-		return ret;
-
-	if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
-	    copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
-	    copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
-	    copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
-	    copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm32_ctx_res {
-	int		count;
-	u32		contexts; /* (drm_ctx_t *) */
-} drm32_ctx_res_t;
-#define DRM32_IOCTL_RES_CTX    DRM_IOWR(0x26, drm32_ctx_res_t)
-
-static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
-	drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
-	drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
-	compat_uptr_t addr;
-	int ret;
-
-	if (copy_in_user(p, uarg, sizeof(int)) ||
-	    get_user(addr, &uarg->contexts) ||
-	    put_user(compat_ptr(addr), &p->contexts))
-		return -EFAULT;
-
-	ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
-	if (ret)
-		return ret;
-
-	if (copy_in_user(uarg, p, sizeof(int)))
-		return -EFAULT;
-
-	return 0;
-}
-
-#endif
-
 typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
 
 #define COMPATIBLE_IOCTL(cmd)		HANDLE_IOCTL((cmd),sys_ioctl)
@@ -485,103 +115,14 @@
 COMPATIBLE_IOCTL(FBIOGCURPOS)
 COMPATIBLE_IOCTL(FBIOGCURMAX)
 /* Little k */
-COMPATIBLE_IOCTL(KIOCTYPE)
-COMPATIBLE_IOCTL(KIOCLAYOUT)
-COMPATIBLE_IOCTL(KIOCGTRANS)
-COMPATIBLE_IOCTL(KIOCTRANS)
-COMPATIBLE_IOCTL(KIOCCMD)
-COMPATIBLE_IOCTL(KIOCSDIRECT)
-COMPATIBLE_IOCTL(KIOCSLED)
-COMPATIBLE_IOCTL(KIOCGLED)
-COMPATIBLE_IOCTL(KIOCSRATE)
-COMPATIBLE_IOCTL(KIOCGRATE)
-COMPATIBLE_IOCTL(VUIDSFORMAT)
-COMPATIBLE_IOCTL(VUIDGFORMAT)
 /* Little v, the video4linux ioctls */
 COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
 COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
-COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS)
-COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS)
-COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS)
-/* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */
-COMPATIBLE_IOCTL(D7SIOCWR)
-COMPATIBLE_IOCTL(D7SIOCTM)
-/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
- * embedded pointers in the arg which we'd need to clean up...
- */
-COMPATIBLE_IOCTL(OPROMGETOPT)
-COMPATIBLE_IOCTL(OPROMSETOPT)
-COMPATIBLE_IOCTL(OPROMNXTOPT)
-COMPATIBLE_IOCTL(OPROMSETOPT2)
-COMPATIBLE_IOCTL(OPROMNEXT)
-COMPATIBLE_IOCTL(OPROMCHILD)
-COMPATIBLE_IOCTL(OPROMGETPROP)
-COMPATIBLE_IOCTL(OPROMNXTPROP)
-COMPATIBLE_IOCTL(OPROMU2P)
-COMPATIBLE_IOCTL(OPROMGETCONS)
-COMPATIBLE_IOCTL(OPROMGETFBNAME)
-COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
-COMPATIBLE_IOCTL(OPROMSETCUR)
-COMPATIBLE_IOCTL(OPROMPCI2NODE)
-COMPATIBLE_IOCTL(OPROMPATH2NODE)
-/* Big L */
-COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
-COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
-/* Big A */
-COMPATIBLE_IOCTL(AUDIO_GETINFO)
-COMPATIBLE_IOCTL(AUDIO_SETINFO)
-COMPATIBLE_IOCTL(AUDIO_DRAIN)
-COMPATIBLE_IOCTL(AUDIO_GETDEV)
-COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
-COMPATIBLE_IOCTL(AUDIO_FLUSH)
-COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
-#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
-COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
-COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
-COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC)
-COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL)
-COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS)
-COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS)
-COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW)
-COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW)
-COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
-#endif /* DRM */
-COMPATIBLE_IOCTL(WIOCSTART)
-COMPATIBLE_IOCTL(WIOCSTOP)
-COMPATIBLE_IOCTL(WIOCGSTAT)
 /* And these ioctls need translation */
 /* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
 HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
 HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
 HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
-#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
-HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version)
-HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique)
-HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique)
-HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap)
-HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs)
-HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs)
-HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs)
-HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma)
-HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx)
-#endif /* DRM */
 #if 0
 HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
 HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index c1f3423..bf1849d 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -154,6 +154,7 @@
 			pud_t *pudp;
 			pmd_t *pmdp;
 			pte_t *ptep;
+			pte_t pte;
 
 			for_each_process(p) {
 				mm = p->mm;
@@ -178,8 +179,9 @@
 			 * being called from inside OBP.
 			 */
 			ptep = pte_offset_map(pmdp, va);
-			if (pte_present(*ptep)) {
-				tte = pte_val(*ptep);
+			pte = *ptep;
+			if (pte_present(pte)) {
+				tte = pte_val(pte);
 				res = PROM_TRUE;
 			}
 			pte_unmap(ptep);
@@ -218,6 +220,7 @@
 			pud_t *pudp;
 			pmd_t *pmdp;
 			pte_t *ptep;
+			pte_t pte;
 			int error;
 
 			if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
@@ -240,8 +243,9 @@
 			 * being called from inside OBP.
 			 */
 			ptep = pte_offset_kernel(pmdp, va);
-			if (pte_present(*ptep)) {
-				tte = pte_val(*ptep);
+			pte = *ptep;
+			if (pte_present(pte)) {
+				tte = pte_val(pte);
 				res = PROM_TRUE;
 			}
 			goto done;
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index aecccd0..009a86e 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -863,6 +863,7 @@
 		pud_t *pudp = pud_offset(pgdp, address);
 		pmd_t *pmdp = pmd_offset(pudp, address);
 		pte_t *ptep;
+		pte_t pte;
 
 		regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
 	
@@ -873,9 +874,10 @@
 
 		preempt_disable();
 		ptep = pte_offset_map(pmdp, address);
-		if (pte_present(*ptep)) {
+		pte = *ptep;
+		if (pte_present(pte)) {
 			unsigned long page = (unsigned long)
-				page_address(pte_page(*ptep));
+				page_address(pte_page(pte));
 
 			wmb();
 			__asm__ __volatile__("flush	%0 + %1"
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b137fd6..5d90ee9 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -839,43 +839,29 @@
  *    questionable (in theory the big win for threads is the massive sharing of
  *    address space state across processors).
  */
+
+/* This currently is only used by the hugetlb arch pre-fault
+ * hook on UltraSPARC-III+ and later when changing the pagesize
+ * bits of the context register for an address space.
+ */
 void smp_flush_tlb_mm(struct mm_struct *mm)
 {
-        /*
-         * This code is called from two places, dup_mmap and exit_mmap. In the
-         * former case, we really need a flush. In the later case, the callers
-         * are single threaded exec_mmap (really need a flush), multithreaded
-         * exec_mmap case (do not need to flush, since the caller gets a new
-         * context via activate_mm), and all other callers of mmput() whence
-         * the flush can be optimized since the associated threads are dead and
-         * the mm is being torn down (__exit_mm and other mmput callers) or the
-         * owning thread is dissociating itself from the mm. The
-         * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
-         * for single thread exec and dup_mmap cases. An alternate check might
-         * have been (current->mm != mm).
-         *                                              Kanoj Sarcar
-         */
-        if (atomic_read(&mm->mm_users) == 0)
-                return;
+	u32 ctx = CTX_HWBITS(mm->context);
+	int cpu = get_cpu();
 
-	{
-		u32 ctx = CTX_HWBITS(mm->context);
-		int cpu = get_cpu();
-
-		if (atomic_read(&mm->mm_users) == 1) {
-			mm->cpu_vm_mask = cpumask_of_cpu(cpu);
-			goto local_flush_and_out;
-		}
-
-		smp_cross_call_masked(&xcall_flush_tlb_mm,
-				      ctx, 0, 0,
-				      mm->cpu_vm_mask);
-
-	local_flush_and_out:
-		__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
-
-		put_cpu();
+	if (atomic_read(&mm->mm_users) == 1) {
+		mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+		goto local_flush_and_out;
 	}
+
+	smp_cross_call_masked(&xcall_flush_tlb_mm,
+			      ctx, 0, 0,
+			      mm->cpu_vm_mask);
+
+local_flush_and_out:
+	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+
+	put_cpu();
 }
 
 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
@@ -883,34 +869,13 @@
 	u32 ctx = CTX_HWBITS(mm->context);
 	int cpu = get_cpu();
 
-	if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
+	if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
 		mm->cpu_vm_mask = cpumask_of_cpu(cpu);
-		goto local_flush_and_out;
-	} else {
-		/* This optimization is not valid.  Normally
-		 * we will be holding the page_table_lock, but
-		 * there is an exception which is copy_page_range()
-		 * when forking.  The lock is held during the individual
-		 * page table updates in the parent, but not at the
-		 * top level, which is where we are invoked.
-		 */
-		if (0) {
-			cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
+	else
+		smp_cross_call_masked(&xcall_flush_tlb_pending,
+				      ctx, nr, (unsigned long) vaddrs,
+				      mm->cpu_vm_mask);
 
-			/* By virtue of running under the mm->page_table_lock,
-			 * and mmu_context.h:switch_mm doing the same, the
-			 * following operation is safe.
-			 */
-			if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
-				goto local_flush_and_out;
-		}
-	}
-
-	smp_cross_call_masked(&xcall_flush_tlb_pending,
-			      ctx, nr, (unsigned long) vaddrs,
-			      mm->cpu_vm_mask);
-
-local_flush_and_out:
 	__flush_tlb_pending(ctx, nr, vaddrs);
 
 	put_cpu();
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
index 7654b8a..3f619ea 100644
--- a/arch/sparc64/kernel/sunos_ioctl32.c
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -24,7 +24,6 @@
 #include <linux/smp_lock.h>
 #include <linux/syscalls.h>
 #include <linux/compat.h>
-#include <asm/kbio.h>
 
 #define SUNOS_NR_OPEN	256
 
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 38c5525..459c8fb 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -60,17 +60,6 @@
 
 static int set_rtc_mmss(unsigned long);
 
-static __init unsigned long dummy_get_tick(void)
-{
-	return 0;
-}
-
-static __initdata struct sparc64_tick_ops dummy_tick_ops = {
-	.get_tick	= dummy_get_tick,
-};
-
-struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops;
-
 #define TICK_PRIV_BIT	(1UL << 63)
 
 #ifdef CONFIG_SMP
@@ -200,6 +189,8 @@
 	.softint_mask	=	1UL << 0,
 };
 
+struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
+
 static void stick_init_tick(unsigned long offset)
 {
 	tick_disable_protection();
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index b02fc22..ba54b58 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -389,6 +389,7 @@
 MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
 
 static struct pci_driver agp_ali_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-ali",
 	.id_table	= agp_ali_pci_table,
 	.probe		= agp_ali_probe,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 3a41672..40fcd88b2 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -94,19 +94,16 @@
 	int retval = 0;
 	int i;
 
-	tables = kmalloc((nr_tables + 1) * sizeof(struct amd_page_map *),
-			 GFP_KERNEL);
+	tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL);
 	if (tables == NULL)
 		return -ENOMEM;
 
-	memset (tables, 0, sizeof(struct amd_page_map *) * (nr_tables + 1));
 	for (i = 0; i < nr_tables; i++) {
-		entry = kmalloc(sizeof(struct amd_page_map), GFP_KERNEL);
+		entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
 		if (entry == NULL) {
 			retval = -ENOMEM;
 			break;
 		}
-		memset (entry, 0, sizeof(struct amd_page_map));
 		tables[i] = entry;
 		retval = amd_create_page_map(entry);
 		if (retval != 0)
@@ -518,6 +515,7 @@
 MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
 
 static struct pci_driver agp_amdk7_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-amdk7",
 	.id_table	= agp_amdk7_pci_table,
 	.probe		= agp_amdk7_probe,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 78ce98a..8f748fd 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -703,6 +703,7 @@
 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
 
 static struct pci_driver agp_amd64_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-amd64",
 	.id_table	= agp_amd64_pci_table,
 	.probe		= agp_amd64_probe,
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 0b6e726..fbd4155 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -118,14 +118,12 @@
 	int retval = 0;
 	int i;
 
-	tables = kmalloc((nr_tables + 1) * sizeof(ati_page_map *),
-			 GFP_KERNEL);
+	tables = kzalloc((nr_tables + 1) * sizeof(ati_page_map *),GFP_KERNEL);
 	if (tables == NULL)
 		return -ENOMEM;
 
-	memset(tables, 0, sizeof(ati_page_map *) * (nr_tables + 1));
 	for (i = 0; i < nr_tables; i++) {
-		entry = kmalloc(sizeof(ati_page_map), GFP_KERNEL);
+		entry = kzalloc(sizeof(ati_page_map), GFP_KERNEL);
 		if (entry == NULL) {
 			while (i>0) {
 				kfree (tables[i-1]);
@@ -136,7 +134,6 @@
 			retval = -ENOMEM;
 			break;
 		}
-		memset(entry, 0, sizeof(ati_page_map));
 		tables[i] = entry;
 		retval = ati_create_page_map(entry);
 		if (retval != 0) break;
@@ -524,6 +521,7 @@
 MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
 
 static struct pci_driver agp_ati_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-ati",
 	.id_table	= agp_ati_pci_table,
 	.probe		= agp_ati_probe,
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 82b43c5..73f333f 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -222,12 +222,12 @@
 
 struct agp_bridge_data *agp_alloc_bridge(void)
 {
-	struct agp_bridge_data *bridge = kmalloc(sizeof(*bridge), GFP_KERNEL);
-
+	struct agp_bridge_data *bridge;
+	
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
 	if (!bridge)
 		return NULL;
 
-	memset(bridge, 0, sizeof(*bridge));
 	atomic_set(&bridge->agp_in_use, 0);
 	atomic_set(&bridge->current_memory_agp, 0);
 
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index ac19fdc..d41e0a6 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -219,7 +219,7 @@
 
 		efficeon_private.l1_table[index] = page;
 
-		value = virt_to_gart(page) | pati | present | index;
+		value = virt_to_gart((unsigned long *)page) | pati | present | index;
 
 		pci_write_config_dword(agp_bridge->dev,
 			EFFICEON_ATTPAGE, value);
@@ -429,6 +429,7 @@
 MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
 
 static struct pci_driver agp_efficeon_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-efficeon",
 	.id_table	= agp_efficeon_pci_table,
 	.probe		= agp_efficeon_probe,
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 3dfb664..17f520c 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -189,13 +189,12 @@
 	struct agp_segment *user_seg;
 	size_t i;
 
-	seg = kmalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
+	seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
 	if (seg == NULL) {
 		kfree(region->seg_list);
 		region->seg_list = NULL;
 		return -ENOMEM;
 	}
-	memset(seg, 0, (sizeof(struct agp_segment_priv) * region->seg_count));
 	user_seg = region->seg_list;
 
 	for (i = 0; i < region->seg_count; i++) {
@@ -332,14 +331,11 @@
 {
 	struct agp_controller *controller;
 
-	controller = kmalloc(sizeof(struct agp_controller), GFP_KERNEL);
-
+	controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
 	if (controller == NULL)
 		return NULL;
 
-	memset(controller, 0, sizeof(struct agp_controller));
 	controller->pid = id;
-
 	return controller;
 }
 
@@ -540,12 +536,10 @@
 {
 	struct agp_client *new_client;
 
-	new_client = kmalloc(sizeof(struct agp_client), GFP_KERNEL);
-
+	new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
 	if (new_client == NULL)
 		return NULL;
 
-	memset(new_client, 0, sizeof(struct agp_client));
 	new_client->pid = id;
 	agp_insert_client(new_client);
 	return new_client;
@@ -709,11 +703,10 @@
 	if (minor != AGPGART_MINOR)
 		goto err_out;
 
-	priv = kmalloc(sizeof(struct agp_file_private), GFP_KERNEL);
+	priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
 	if (priv == NULL)
 		goto err_out_nomem;
 
-	memset(priv, 0, sizeof(struct agp_file_private));
 	set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
 	priv->my_pid = current->pid;
 
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index ac9da0c..c4a3871 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -105,12 +105,10 @@
 {
 	struct agp_memory *new;
 
-	new = kmalloc(sizeof(struct agp_memory), GFP_KERNEL);
-
+	new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
 	if (new == NULL)
 		return NULL;
 
-	memset(new, 0, sizeof(struct agp_memory));
 	new->key = agp_get_key();
 
 	if (new->key < 0) {
@@ -414,7 +412,8 @@
 	u32 tmp;
 
 	if (*requested_mode & AGP2_RESERVED_MASK) {
-		printk(KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
+		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+			*requested_mode & AGP2_RESERVED_MASK, *requested_mode);
 		*requested_mode &= ~AGP2_RESERVED_MASK;
 	}
 
@@ -492,7 +491,8 @@
 	u32 tmp;
 
 	if (*requested_mode & AGP3_RESERVED_MASK) {
-		printk(KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
+		printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
+			*requested_mode & AGP3_RESERVED_MASK, *requested_mode);
 		*requested_mode &= ~AGP3_RESERVED_MASK;
 	}
 
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index a2d9e5e..58944cd 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -227,10 +227,9 @@
 	 */
 	if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
 		size = current_size->num_entries * sizeof(i460.lp_desc[0]);
-		i460.lp_desc = kmalloc(size, GFP_KERNEL);
+		i460.lp_desc = kzalloc(size, GFP_KERNEL);
 		if (!i460.lp_desc)
 			return -ENOMEM;
-		memset(i460.lp_desc, 0, size);
 	}
 	return 0;
 }
@@ -366,13 +365,12 @@
 	}
 
 	map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
-	lp->alloced_map = kmalloc(map_size, GFP_KERNEL);
+	lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
 	if (!lp->alloced_map) {
 		free_pages((unsigned long) lpage, order);
 		printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
 		return -ENOMEM;
 	}
-	memset(lp->alloced_map, 0, map_size);
 
 	lp->paddr = virt_to_gart(lpage);
 	lp->refcount = 0;
@@ -619,6 +617,7 @@
 MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
 
 static struct pci_driver agp_intel_i460_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-intel-i460",
 	.id_table	= agp_intel_i460_pci_table,
 	.probe		= agp_intel_i460_probe,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 1f7d415..bf4cc9f 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -1824,6 +1824,7 @@
 MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
 
 static struct pci_driver agp_intel_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-intel",
 	.id_table	= agp_intel_pci_table,
 	.probe		= agp_intel_probe,
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 80dafa3..3aed0c5 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -398,6 +398,7 @@
 MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
 
 static struct pci_driver agp_nvidia_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-nvidia",
 	.id_table	= agp_nvidia_pci_table,
 	.probe		= agp_nvidia_probe,
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index 7957fc9..4df7734 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -289,6 +289,8 @@
 	j = 0;
 	list_for_each_entry(info, &tioca_list, ca_list) {
 		struct list_head *tmp;
+		if (list_empty(info->ca_devices))
+			continue;
 		list_for_each(tmp, info->ca_devices) {
 			u8 cap_ptr;
 			pdev = pci_dev_b(tmp);
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index ebc0555..a701361 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -332,6 +332,7 @@
 MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
 
 static struct pci_driver agp_sis_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-sis",
 	.id_table	= agp_sis_pci_table,
 	.probe		= agp_sis_probe,
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 71ea59a..5a5392d 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -102,19 +102,17 @@
 	int retval = 0;
 	int i;
 
-	tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 
+	tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), 
 			 GFP_KERNEL);
-	if (tables == NULL) {
+	if (tables == NULL)
 		return -ENOMEM;
-	}
-	memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1));
+
 	for (i = 0; i < nr_tables; i++) {
-		entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
+		entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
 		if (entry == NULL) {
 			retval = -ENOMEM;
 			break;
 		}
-		memset(entry, 0, sizeof(struct serverworks_page_map));
 		tables[i] = entry;
 		retval = serverworks_create_page_map(entry);
 		if (retval != 0) break;
@@ -244,13 +242,27 @@
  */
 static void serverworks_tlbflush(struct agp_memory *temp)
 {
+	unsigned long timeout;
+
 	writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
-	while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1)
+	timeout = jiffies + 3*HZ;
+	while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
 		cpu_relax();
+		if (time_after(jiffies, timeout)) {
+			printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n");
+			break;
+		}
+	}
 
 	writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
-	while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1)
+	timeout = jiffies + 3*HZ;
+	while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
 		cpu_relax();
+		if (time_after(jiffies, timeout)) {
+			printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n");
+			break;
+		}
+	}
 }
 
 static int serverworks_configure(void)
@@ -533,6 +545,7 @@
 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
 
 static struct pci_driver agp_serverworks_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-serverworks",
 	.id_table	= agp_serverworks_pci_table,
 	.probe		= agp_serverworks_probe,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index c825531..183c50a 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -658,6 +658,7 @@
 MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
 
 static struct pci_driver agp_uninorth_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-uninorth",
 	.id_table	= agp_uninorth_pci_table,
 	.probe		= agp_uninorth_probe,
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index c847df5..5d9a137 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -518,6 +518,7 @@
 
 
 static struct pci_driver agp_via_pci_driver = {
+	.owner		= THIS_MODULE,
 	.name		= "agpgart-via",
 	.id_table	= agp_via_pci_table,
 	.probe		= agp_via_probe,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6c6121b..25acf47 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -593,12 +593,11 @@
 		goto module_out;
 	}
 
-	policy = kmalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
+	policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
 	if (!policy) {
 		ret = -ENOMEM;
 		goto nomem_out;
 	}
-	memset(policy, 0, sizeof(struct cpufreq_policy));
 
 	policy->cpu = cpu;
 	policy->cpus = cpumask_of_cpu(cpu);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c1fc9c6..1774111 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -48,7 +48,10 @@
  * All times here are in uS.
  */
 static unsigned int 				def_sampling_rate;
-#define MIN_SAMPLING_RATE			(def_sampling_rate / 2)
+#define MIN_SAMPLING_RATE_RATIO			(2)
+/* for correct statistics, we need at least 10 ticks between each measure */
+#define MIN_STAT_SAMPLING_RATE			(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
+#define MIN_SAMPLING_RATE			(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
 #define MAX_SAMPLING_RATE			(500 * def_sampling_rate)
 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER	(1000)
 #define DEF_SAMPLING_DOWN_FACTOR		(1)
@@ -416,13 +419,16 @@
 		if (dbs_enable == 1) {
 			unsigned int latency;
 			/* policy latency is in nS. Convert it to uS first */
+			latency = policy->cpuinfo.transition_latency / 1000;
+			if (latency == 0)
+				latency = 1;
 
-			latency = policy->cpuinfo.transition_latency;
-			if (latency < 1000)
-				latency = 1000;
-
-			def_sampling_rate = (latency / 1000) *
+			def_sampling_rate = latency *
 					DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
+
+			if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
+				def_sampling_rate = MIN_STAT_SAMPLING_RATE;
+
 			dbs_tuners_ins.sampling_rate = def_sampling_rate;
 			dbs_tuners_ins.ignore_nice = 0;
 
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 3597f25..0bddb8e 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -193,11 +193,15 @@
 	unsigned int cpu = policy->cpu;
 	if (cpufreq_stats_table[cpu])
 		return -EBUSY;
-	if ((stat = kmalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL)
+	if ((stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL)) == NULL)
 		return -ENOMEM;
-	memset(stat, 0, sizeof (struct cpufreq_stats));
 
 	data = cpufreq_cpu_get(cpu);
+	if (data == NULL) {
+		ret = -EINVAL;
+		goto error_get_fail;
+	}
+
 	if ((ret = sysfs_create_group(&data->kobj, &stats_attr_group)))
 		goto error_out;
 
@@ -217,12 +221,11 @@
 	alloc_size += count * count * sizeof(int);
 #endif
 	stat->max_state = count;
-	stat->time_in_state = kmalloc(alloc_size, GFP_KERNEL);
+	stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
 	if (!stat->time_in_state) {
 		ret = -ENOMEM;
 		goto error_out;
 	}
-	memset(stat->time_in_state, 0, alloc_size);
 	stat->freq_table = (unsigned int *)(stat->time_in_state + count);
 
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
@@ -245,6 +248,7 @@
 	return 0;
 error_out:
 	cpufreq_cpu_put(data);
+error_get_fail:
 	kfree(stat);
 	cpufreq_stats_table[cpu] = NULL;
 	return ret;
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index ceae379..da52839 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -1263,7 +1263,7 @@
  */
 int mmc_resume_host(struct mmc_host *host)
 {
-	mmc_detect_change(host, 0);
+	mmc_rescan(host);
 
 	return 0;
 }
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
index c82abeb..071ae24 100644
--- a/drivers/sbus/char/cpwatchdog.c
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/timer.h>
+#include <linux/smp_lock.h>
 #include <asm/irq.h>
 #include <asm/ebus.h>
 #include <asm/oplib.h>
@@ -394,6 +395,28 @@
 	return(0);
 }
 
+static long wd_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	int rval = -ENOIOCTLCMD;
+
+	switch (cmd) {
+	/* solaris ioctls are specific to this driver */
+	case WIOCSTART:
+	case WIOCSTOP:
+	case WIOCGSTAT:
+		lock_kernel();
+		rval = wd_ioctl(file->f_dentry->d_inode, file, cmd, arg);
+		lock_kernel();
+		break;
+	/* everything else is handled by the generic compat layer */
+	default:
+		break;
+	}
+
+	return rval;
+}
+
 static ssize_t wd_write(struct file 	*file, 
 			const char	__user *buf, 
 			size_t 		count, 
@@ -441,6 +464,7 @@
 static struct file_operations wd_fops = {
 	.owner =	THIS_MODULE,
 	.ioctl =	wd_ioctl,
+	.compat_ioctl =	wd_compat_ioctl,
 	.open =		wd_open,
 	.write =	wd_write,
 	.read =		wd_read,
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 24ed589..39f5421 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/miscdevice.h>
 #include <linux/ioport.h>		/* request_region */
+#include <linux/smp_lock.h>
 #include <asm/atomic.h>
 #include <asm/ebus.h>			/* EBus device					*/
 #include <asm/oplib.h>			/* OpenProm Library 			*/
@@ -114,22 +115,25 @@
 	return 0;
 }
 
-static int d7s_ioctl(struct inode *inode, struct file *f, 
-		     unsigned int cmd, unsigned long arg)
+static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	__u8 regs = readb(d7s_regs);
 	__u8 ireg = 0;
+	int error = 0
 
-	if (D7S_MINOR != iminor(inode))
+	if (D7S_MINOR != iminor(file->f_dentry->d_inode))
 		return -ENODEV;
 
+	lock_kernel();
 	switch (cmd) {
 	case D7SIOCWR:
 		/* assign device register values
 		 * we mask-out D7S_FLIP if in sol_compat mode
 		 */
-		if (get_user(ireg, (int __user *) arg))
-			return -EFAULT;
+		if (get_user(ireg, (int __user *) arg)) {
+			error = -EFAULT;
+			break;
+		}
 		if (0 != sol_compat) {
 			(regs & D7S_FLIP) ? 
 				(ireg |= D7S_FLIP) : (ireg &= ~D7S_FLIP);
@@ -144,8 +148,10 @@
 		 * This driver will not misinform you about the state
 		 * of your hardware while in sol_compat mode
 		 */
-		if (put_user(regs, (int __user *) arg))
-			return -EFAULT;
+		if (put_user(regs, (int __user *) arg)) {
+			error = -EFAULT;
+			break;
+		}
 		break;
 
 	case D7SIOCTM:
@@ -155,15 +161,17 @@
 		writeb(regs, d7s_regs);
 		break;
 	};
+	lock_kernel();
 
-	return 0;
+	return error;
 }
 
 static struct file_operations d7s_fops = {
-	.owner =	THIS_MODULE,
-	.ioctl =	d7s_ioctl,
-	.open =		d7s_open,
-	.release =	d7s_release,
+	.owner =		THIS_MODULE,
+	.unlocked_ioctl =	d7s_ioctl,
+	.compat_ioctl =		d7s_ioctl,
+	.open =			d7s_open,
+	.release =		d7s_release,
 };
 
 static struct miscdevice d7s_miscdev = { D7S_MINOR, D7S_DEVNAME, &d7s_fops };
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index ba56762..19e8edd 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -654,9 +654,8 @@
 /* Function Description: Command what to read.  Mapped to user ioctl().
  * Return: Gives 0 for implemented commands, -EINVAL otherwise.
  */
-static int
-envctrl_ioctl(struct inode *inode, struct file *file,
-	      unsigned int cmd, unsigned long arg)
+static long
+envctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	char __user *infobuf;
 
@@ -715,11 +714,14 @@
 }
 
 static struct file_operations envctrl_fops = {
-	.owner =	THIS_MODULE,
-	.read =		envctrl_read,
-	.ioctl =	envctrl_ioctl,
-	.open =		envctrl_open,
-	.release =	envctrl_release,
+	.owner =		THIS_MODULE,
+	.read =			envctrl_read,
+	.unlocked_ioctl =	envctrl_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl =		envctrl_ioctl,
+#endif
+	.open =			envctrl_open,
+	.release =		envctrl_release,
 };	
 
 static struct miscdevice envctrl_dev = {
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 58ed337..5028ac2 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -39,6 +39,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/miscdevice.h>
+#include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <asm/oplib.h>
@@ -565,6 +566,38 @@
 	}
 }
 
+static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	long rval = -ENOTTY;
+
+	/*
+	 * SunOS/Solaris only, the NetBSD one's have embedded pointers in
+	 * the arg which we'd need to clean up...
+	 */
+	switch (cmd) {
+	case OPROMGETOPT:
+	case OPROMSETOPT:
+	case OPROMNXTOPT:
+	case OPROMSETOPT2:
+	case OPROMNEXT:
+	case OPROMCHILD:
+	case OPROMGETPROP:
+	case OPROMNXTPROP:
+	case OPROMU2P:
+	case OPROMGETCONS:
+	case OPROMGETFBNAME:
+	case OPROMGETBOOTARGS:
+	case OPROMSETCUR:
+	case OPROMPCI2NODE:
+	case OPROMPATH2NODE:
+		lock_kernel();
+		rval = openprom_ioctl(file->f_dentry->d_inode, file, cmd, arg);
+		lock_kernel();
+		break;
+	}
+}
+
 static int openprom_open(struct inode * inode, struct file * file)
 {
 	DATA *data;
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 656c0e8..f073853 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1441,7 +1441,7 @@
  *	- initialize the serial port
  *	Return non-zero if we didn't find a serial port.
  */
-static int __init sunsu_console_setup(struct console *co, char *options)
+static int sunsu_console_setup(struct console *co, char *options)
 {
 	struct uart_port *port;
 	int baud = 9600;
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 3280bb9..414c440 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -653,12 +653,6 @@
 	sbus_writel(0, &fbc->clipminy);
 	sbus_writel(info->var.xres - 1, &fbc->clipmaxx);
 	sbus_writel(info->var.yres - 1, &fbc->clipmaxy);
-
-	/* Disable cursor in Brooktree DAC. */
-	sbus_writel(0x06 << 24, &par->bt->addr);
-	tmp = sbus_readl(&par->bt->control);
-	tmp &= ~(0x03 << 24);
-	sbus_writel(tmp, &par->bt->control);
 }
 
 struct all_info {
diff --git a/fs/Makefile b/fs/Makefile
index 1972da1..4c26557 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -10,7 +10,7 @@
 		ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \
 		attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \
 		seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \
-		ioprio.o
+		ioprio.o pnode.o
 
 obj-$(CONFIG_INOTIFY)		+= inotify.o
 obj-$(CONFIG_EPOLL)		+= eventpoll.o
diff --git a/fs/dquot.c b/fs/dquot.c
index afa06a8..05b6028 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -1321,13 +1321,11 @@
 	int cnt;
 	struct quota_info *dqopt = sb_dqopt(sb);
 	struct inode *toputinode[MAXQUOTAS];
-	struct vfsmount *toputmnt[MAXQUOTAS];
 
 	/* We need to serialize quota_off() for device */
 	down(&dqopt->dqonoff_sem);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		toputinode[cnt] = NULL;
-		toputmnt[cnt] = NULL;
 		if (type != -1 && cnt != type)
 			continue;
 		if (!sb_has_quota_enabled(sb, cnt))
@@ -1348,9 +1346,7 @@
 		put_quota_format(dqopt->info[cnt].dqi_format);
 
 		toputinode[cnt] = dqopt->files[cnt];
-		toputmnt[cnt] = dqopt->mnt[cnt];
 		dqopt->files[cnt] = NULL;
-		dqopt->mnt[cnt] = NULL;
 		dqopt->info[cnt].dqi_flags = 0;
 		dqopt->info[cnt].dqi_igrace = 0;
 		dqopt->info[cnt].dqi_bgrace = 0;
@@ -1358,10 +1354,7 @@
 	}
 	up(&dqopt->dqonoff_sem);
 	/* Sync the superblock so that buffers with quota data are written to
-	 * disk (and so userspace sees correct data afterwards).
-	 * The reference to vfsmnt we are still holding protects us from
-	 * umount (we don't have it only when quotas are turned on/off for
-	 * journal replay but in that case we are guarded by the fs anyway). */
+	 * disk (and so userspace sees correct data afterwards). */
 	if (sb->s_op->sync_fs)
 		sb->s_op->sync_fs(sb, 1);
 	sync_blockdev(sb->s_bdev);
@@ -1385,10 +1378,6 @@
 				iput(toputinode[cnt]);
 			}
 			up(&dqopt->dqonoff_sem);
-			/* We don't hold the reference when we turned on quotas
-			 * just for the journal replay... */
-			if (toputmnt[cnt])
-				mntput(toputmnt[cnt]);
 		}
 	if (sb->s_bdev)
 		invalidate_bdev(sb->s_bdev, 0);
@@ -1503,11 +1492,8 @@
 	/* Quota file not on the same filesystem? */
 	if (nd.mnt->mnt_sb != sb)
 		error = -EXDEV;
-	else {
+	else
 		error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id);
-		if (!error)
-			sb_dqopt(sb)->mnt[type] = mntget(nd.mnt);
-	}
 out_path:
 	path_release(&nd);
 	return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index 2fa9fdf..caa9187 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -24,6 +24,7 @@
 #include <linux/mount.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
+#include "pnode.h"
 
 extern int __init init_rootfs(void);
 
@@ -37,33 +38,39 @@
 #endif
 
 /* spinlock for vfsmount related operations, inplace of dcache_lock */
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
+
+static int event;
 
 static struct list_head *mount_hashtable;
 static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache; 
+static kmem_cache_t *mnt_cache;
+static struct rw_semaphore namespace_sem;
 
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
-	unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
-	tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
+	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
+	tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
 	tmp = tmp + (tmp >> hash_bits);
 	return tmp & hash_mask;
 }
 
 struct vfsmount *alloc_vfsmnt(const char *name)
 {
-	struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); 
+	struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
 	if (mnt) {
 		memset(mnt, 0, sizeof(struct vfsmount));
-		atomic_set(&mnt->mnt_count,1);
+		atomic_set(&mnt->mnt_count, 1);
 		INIT_LIST_HEAD(&mnt->mnt_hash);
 		INIT_LIST_HEAD(&mnt->mnt_child);
 		INIT_LIST_HEAD(&mnt->mnt_mounts);
 		INIT_LIST_HEAD(&mnt->mnt_list);
 		INIT_LIST_HEAD(&mnt->mnt_expire);
+		INIT_LIST_HEAD(&mnt->mnt_share);
+		INIT_LIST_HEAD(&mnt->mnt_slave_list);
+		INIT_LIST_HEAD(&mnt->mnt_slave);
 		if (name) {
-			int size = strlen(name)+1;
+			int size = strlen(name) + 1;
 			char *newname = kmalloc(size, GFP_KERNEL);
 			if (newname) {
 				memcpy(newname, name, size);
@@ -81,36 +88,65 @@
 }
 
 /*
- * Now, lookup_mnt increments the ref count before returning
- * the vfsmount struct.
+ * find the first or last mount at @dentry on vfsmount @mnt depending on
+ * @dir. If @dir is set return the first mount else return the last mount.
  */
-struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
+			      int dir)
 {
-	struct list_head * head = mount_hashtable + hash(mnt, dentry);
-	struct list_head * tmp = head;
+	struct list_head *head = mount_hashtable + hash(mnt, dentry);
+	struct list_head *tmp = head;
 	struct vfsmount *p, *found = NULL;
 
-	spin_lock(&vfsmount_lock);
 	for (;;) {
-		tmp = tmp->next;
+		tmp = dir ? tmp->next : tmp->prev;
 		p = NULL;
 		if (tmp == head)
 			break;
 		p = list_entry(tmp, struct vfsmount, mnt_hash);
 		if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
-			found = mntget(p);
+			found = p;
 			break;
 		}
 	}
-	spin_unlock(&vfsmount_lock);
 	return found;
 }
 
+/*
+ * lookup_mnt increments the ref count before returning
+ * the vfsmount struct.
+ */
+struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+{
+	struct vfsmount *child_mnt;
+	spin_lock(&vfsmount_lock);
+	if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
+		mntget(child_mnt);
+	spin_unlock(&vfsmount_lock);
+	return child_mnt;
+}
+
 static inline int check_mnt(struct vfsmount *mnt)
 {
 	return mnt->mnt_namespace == current->namespace;
 }
 
+static void touch_namespace(struct namespace *ns)
+{
+	if (ns) {
+		ns->event = ++event;
+		wake_up_interruptible(&ns->poll);
+	}
+}
+
+static void __touch_namespace(struct namespace *ns)
+{
+	if (ns && ns->event != event) {
+		ns->event = event;
+		wake_up_interruptible(&ns->poll);
+	}
+}
+
 static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
 {
 	old_nd->dentry = mnt->mnt_mountpoint;
@@ -122,13 +158,43 @@
 	old_nd->dentry->d_mounted--;
 }
 
+void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
+			struct vfsmount *child_mnt)
+{
+	child_mnt->mnt_parent = mntget(mnt);
+	child_mnt->mnt_mountpoint = dget(dentry);
+	dentry->d_mounted++;
+}
+
 static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
 {
-	mnt->mnt_parent = mntget(nd->mnt);
-	mnt->mnt_mountpoint = dget(nd->dentry);
-	list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry));
+	mnt_set_mountpoint(nd->mnt, nd->dentry, mnt);
+	list_add_tail(&mnt->mnt_hash, mount_hashtable +
+			hash(nd->mnt, nd->dentry));
 	list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
-	nd->dentry->d_mounted++;
+}
+
+/*
+ * the caller must hold vfsmount_lock
+ */
+static void commit_tree(struct vfsmount *mnt)
+{
+	struct vfsmount *parent = mnt->mnt_parent;
+	struct vfsmount *m;
+	LIST_HEAD(head);
+	struct namespace *n = parent->mnt_namespace;
+
+	BUG_ON(parent == mnt);
+
+	list_add_tail(&head, &mnt->mnt_list);
+	list_for_each_entry(m, &head, mnt_list)
+		m->mnt_namespace = n;
+	list_splice(&head, n->list.prev);
+
+	list_add_tail(&mnt->mnt_hash, mount_hashtable +
+				hash(parent, mnt->mnt_mountpoint));
+	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+	touch_namespace(n);
 }
 
 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
@@ -147,8 +213,18 @@
 	return list_entry(next, struct vfsmount, mnt_child);
 }
 
-static struct vfsmount *
-clone_mnt(struct vfsmount *old, struct dentry *root)
+static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
+{
+	struct list_head *prev = p->mnt_mounts.prev;
+	while (prev != &p->mnt_mounts) {
+		p = list_entry(prev, struct vfsmount, mnt_child);
+		prev = p->mnt_mounts.prev;
+	}
+	return p;
+}
+
+static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
+					int flag)
 {
 	struct super_block *sb = old->mnt_sb;
 	struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
@@ -160,19 +236,34 @@
 		mnt->mnt_root = dget(root);
 		mnt->mnt_mountpoint = mnt->mnt_root;
 		mnt->mnt_parent = mnt;
-		mnt->mnt_namespace = current->namespace;
+
+		if (flag & CL_SLAVE) {
+			list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+			mnt->mnt_master = old;
+			CLEAR_MNT_SHARED(mnt);
+		} else {
+			if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
+				list_add(&mnt->mnt_share, &old->mnt_share);
+			if (IS_MNT_SLAVE(old))
+				list_add(&mnt->mnt_slave, &old->mnt_slave);
+			mnt->mnt_master = old->mnt_master;
+		}
+		if (flag & CL_MAKE_SHARED)
+			set_mnt_shared(mnt);
 
 		/* stick the duplicate mount on the same expiry list
 		 * as the original if that was on one */
-		spin_lock(&vfsmount_lock);
-		if (!list_empty(&old->mnt_expire))
-			list_add(&mnt->mnt_expire, &old->mnt_expire);
-		spin_unlock(&vfsmount_lock);
+		if (flag & CL_EXPIRE) {
+			spin_lock(&vfsmount_lock);
+			if (!list_empty(&old->mnt_expire))
+				list_add(&mnt->mnt_expire, &old->mnt_expire);
+			spin_unlock(&vfsmount_lock);
+		}
 	}
 	return mnt;
 }
 
-void __mntput(struct vfsmount *mnt)
+static inline void __mntput(struct vfsmount *mnt)
 {
 	struct super_block *sb = mnt->mnt_sb;
 	dput(mnt->mnt_root);
@@ -180,7 +271,46 @@
 	deactivate_super(sb);
 }
 
-EXPORT_SYMBOL(__mntput);
+void mntput_no_expire(struct vfsmount *mnt)
+{
+repeat:
+	if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
+		if (likely(!mnt->mnt_pinned)) {
+			spin_unlock(&vfsmount_lock);
+			__mntput(mnt);
+			return;
+		}
+		atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
+		mnt->mnt_pinned = 0;
+		spin_unlock(&vfsmount_lock);
+		acct_auto_close_mnt(mnt);
+		security_sb_umount_close(mnt);
+		goto repeat;
+	}
+}
+
+EXPORT_SYMBOL(mntput_no_expire);
+
+void mnt_pin(struct vfsmount *mnt)
+{
+	spin_lock(&vfsmount_lock);
+	mnt->mnt_pinned++;
+	spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL(mnt_pin);
+
+void mnt_unpin(struct vfsmount *mnt)
+{
+	spin_lock(&vfsmount_lock);
+	if (mnt->mnt_pinned) {
+		atomic_inc(&mnt->mnt_count);
+		mnt->mnt_pinned--;
+	}
+	spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL(mnt_unpin);
 
 /* iterator */
 static void *m_start(struct seq_file *m, loff_t *pos)
@@ -189,7 +319,7 @@
 	struct list_head *p;
 	loff_t l = *pos;
 
-	down_read(&n->sem);
+	down_read(&namespace_sem);
 	list_for_each(p, &n->list)
 		if (!l--)
 			return list_entry(p, struct vfsmount, mnt_list);
@@ -201,13 +331,12 @@
 	struct namespace *n = m->private;
 	struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
 	(*pos)++;
-	return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
+	return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
 }
 
 static void m_stop(struct seq_file *m, void *v)
 {
-	struct namespace *n = m->private;
-	up_read(&n->sem);
+	up_read(&namespace_sem);
 }
 
 static inline void mangle(struct seq_file *m, const char *s)
@@ -275,35 +404,14 @@
  */
 int may_umount_tree(struct vfsmount *mnt)
 {
-	struct list_head *next;
-	struct vfsmount *this_parent = mnt;
-	int actual_refs;
-	int minimum_refs;
+	int actual_refs = 0;
+	int minimum_refs = 0;
+	struct vfsmount *p;
 
 	spin_lock(&vfsmount_lock);
-	actual_refs = atomic_read(&mnt->mnt_count);
-	minimum_refs = 2;
-repeat:
-	next = this_parent->mnt_mounts.next;
-resume:
-	while (next != &this_parent->mnt_mounts) {
-		struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child);
-
-		next = next->next;
-
+	for (p = mnt; p; p = next_mnt(p, mnt)) {
 		actual_refs += atomic_read(&p->mnt_count);
 		minimum_refs += 2;
-
-		if (!list_empty(&p->mnt_mounts)) {
-			this_parent = p;
-			goto repeat;
-		}
-	}
-
-	if (this_parent != mnt) {
-		next = this_parent->mnt_child.next;
-		this_parent = this_parent->mnt_parent;
-		goto resume;
 	}
 	spin_unlock(&vfsmount_lock);
 
@@ -330,45 +438,67 @@
  */
 int may_umount(struct vfsmount *mnt)
 {
-	if (atomic_read(&mnt->mnt_count) > 2)
-		return -EBUSY;
-	return 0;
+	int ret = 0;
+	spin_lock(&vfsmount_lock);
+	if (propagate_mount_busy(mnt, 2))
+		ret = -EBUSY;
+	spin_unlock(&vfsmount_lock);
+	return ret;
 }
 
 EXPORT_SYMBOL(may_umount);
 
-static void umount_tree(struct vfsmount *mnt)
+void release_mounts(struct list_head *head)
 {
-	struct vfsmount *p;
-	LIST_HEAD(kill);
-
-	for (p = mnt; p; p = next_mnt(p, mnt)) {
-		list_del(&p->mnt_list);
-		list_add(&p->mnt_list, &kill);
-		p->mnt_namespace = NULL;
-	}
-
-	while (!list_empty(&kill)) {
-		mnt = list_entry(kill.next, struct vfsmount, mnt_list);
-		list_del_init(&mnt->mnt_list);
-		list_del_init(&mnt->mnt_expire);
-		if (mnt->mnt_parent == mnt) {
+	struct vfsmount *mnt;
+	while(!list_empty(head)) {
+		mnt = list_entry(head->next, struct vfsmount, mnt_hash);
+		list_del_init(&mnt->mnt_hash);
+		if (mnt->mnt_parent != mnt) {
+			struct dentry *dentry;
+			struct vfsmount *m;
+			spin_lock(&vfsmount_lock);
+			dentry = mnt->mnt_mountpoint;
+			m = mnt->mnt_parent;
+			mnt->mnt_mountpoint = mnt->mnt_root;
+			mnt->mnt_parent = mnt;
 			spin_unlock(&vfsmount_lock);
-		} else {
-			struct nameidata old_nd;
-			detach_mnt(mnt, &old_nd);
-			spin_unlock(&vfsmount_lock);
-			path_release(&old_nd);
+			dput(dentry);
+			mntput(m);
 		}
 		mntput(mnt);
-		spin_lock(&vfsmount_lock);
+	}
+}
+
+void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
+{
+	struct vfsmount *p;
+
+	for (p = mnt; p; p = next_mnt(p, mnt)) {
+		list_del(&p->mnt_hash);
+		list_add(&p->mnt_hash, kill);
+	}
+
+	if (propagate)
+		propagate_umount(kill);
+
+	list_for_each_entry(p, kill, mnt_hash) {
+		list_del_init(&p->mnt_expire);
+		list_del_init(&p->mnt_list);
+		__touch_namespace(p->mnt_namespace);
+		p->mnt_namespace = NULL;
+		list_del_init(&p->mnt_child);
+		if (p->mnt_parent != p)
+			mnt->mnt_mountpoint->d_mounted--;
+		change_mnt_propagation(p, MS_PRIVATE);
 	}
 }
 
 static int do_umount(struct vfsmount *mnt, int flags)
 {
-	struct super_block * sb = mnt->mnt_sb;
+	struct super_block *sb = mnt->mnt_sb;
 	int retval;
+	LIST_HEAD(umount_list);
 
 	retval = security_sb_umount(mnt, flags);
 	if (retval)
@@ -403,7 +533,7 @@
 	 */
 
 	lock_kernel();
-	if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
+	if ((flags & MNT_FORCE) && sb->s_op->umount_begin)
 		sb->s_op->umount_begin(sb);
 	unlock_kernel();
 
@@ -432,29 +562,21 @@
 		return retval;
 	}
 
-	down_write(&current->namespace->sem);
+	down_write(&namespace_sem);
 	spin_lock(&vfsmount_lock);
+	event++;
 
-	if (atomic_read(&sb->s_active) == 1) {
-		/* last instance - try to be smart */
-		spin_unlock(&vfsmount_lock);
-		lock_kernel();
-		DQUOT_OFF(sb);
-		acct_auto_close(sb);
-		unlock_kernel();
-		security_sb_umount_close(mnt);
-		spin_lock(&vfsmount_lock);
-	}
 	retval = -EBUSY;
-	if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
+	if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
 		if (!list_empty(&mnt->mnt_list))
-			umount_tree(mnt);
+			umount_tree(mnt, 1, &umount_list);
 		retval = 0;
 	}
 	spin_unlock(&vfsmount_lock);
 	if (retval)
 		security_sb_umount_busy(mnt);
-	up_write(&current->namespace->sem);
+	up_write(&namespace_sem);
+	release_mounts(&umount_list);
 	return retval;
 }
 
@@ -494,12 +616,11 @@
 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
 
 /*
- *	The 2.0 compatible umount. No flags. 
+ *	The 2.0 compatible umount. No flags.
  */
- 
 asmlinkage long sys_oldumount(char __user * name)
 {
-	return sys_umount(name,0);
+	return sys_umount(name, 0);
 }
 
 #endif
@@ -522,8 +643,7 @@
 #endif
 }
 
-static int
-lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
+static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
 {
 	while (1) {
 		if (d == dentry)
@@ -534,12 +654,16 @@
 	}
 }
 
-static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
+struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
+					int flag)
 {
 	struct vfsmount *res, *p, *q, *r, *s;
 	struct nameidata nd;
 
-	res = q = clone_mnt(mnt, dentry);
+	if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
+		return NULL;
+
+	res = q = clone_mnt(mnt, dentry, flag);
 	if (!q)
 		goto Enomem;
 	q->mnt_mountpoint = mnt->mnt_mountpoint;
@@ -550,6 +674,10 @@
 			continue;
 
 		for (s = r; s; s = next_mnt(s, r)) {
+			if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
+				s = skip_mnt_tree(s);
+				continue;
+			}
 			while (p != s->mnt_parent) {
 				p = p->mnt_parent;
 				q = q->mnt_parent;
@@ -557,7 +685,7 @@
 			p = s;
 			nd.mnt = q;
 			nd.dentry = p->mnt_mountpoint;
-			q = clone_mnt(p, p->mnt_root);
+			q = clone_mnt(p, p->mnt_root, flag);
 			if (!q)
 				goto Enomem;
 			spin_lock(&vfsmount_lock);
@@ -567,15 +695,114 @@
 		}
 	}
 	return res;
- Enomem:
+Enomem:
 	if (res) {
+		LIST_HEAD(umount_list);
 		spin_lock(&vfsmount_lock);
-		umount_tree(res);
+		umount_tree(res, 0, &umount_list);
 		spin_unlock(&vfsmount_lock);
+		release_mounts(&umount_list);
 	}
 	return NULL;
 }
 
+/*
+ *  @source_mnt : mount tree to be attached
+ *  @nd         : place the mount tree @source_mnt is attached
+ *  @parent_nd  : if non-null, detach the source_mnt from its parent and
+ *  		   store the parent mount and mountpoint dentry.
+ *  		   (done when source_mnt is moved)
+ *
+ *  NOTE: in the table below explains the semantics when a source mount
+ *  of a given type is attached to a destination mount of a given type.
+ * ---------------------------------------------------------------------------
+ * |         BIND MOUNT OPERATION                                            |
+ * |**************************************************************************
+ * | source-->| shared        |       private  |       slave    | unbindable |
+ * | dest     |               |                |                |            |
+ * |   |      |               |                |                |            |
+ * |   v      |               |                |                |            |
+ * |**************************************************************************
+ * |  shared  | shared (++)   |     shared (+) |     shared(+++)|  invalid   |
+ * |          |               |                |                |            |
+ * |non-shared| shared (+)    |      private   |      slave (*) |  invalid   |
+ * ***************************************************************************
+ * A bind operation clones the source mount and mounts the clone on the
+ * destination mount.
+ *
+ * (++)  the cloned mount is propagated to all the mounts in the propagation
+ * 	 tree of the destination mount and the cloned mount is added to
+ * 	 the peer group of the source mount.
+ * (+)   the cloned mount is created under the destination mount and is marked
+ *       as shared. The cloned mount is added to the peer group of the source
+ *       mount.
+ * (+++) the mount is propagated to all the mounts in the propagation tree
+ *       of the destination mount and the cloned mount is made slave
+ *       of the same master as that of the source mount. The cloned mount
+ *       is marked as 'shared and slave'.
+ * (*)   the cloned mount is made a slave of the same master as that of the
+ * 	 source mount.
+ *
+ * ---------------------------------------------------------------------------
+ * |         		MOVE MOUNT OPERATION                                 |
+ * |**************************************************************************
+ * | source-->| shared        |       private  |       slave    | unbindable |
+ * | dest     |               |                |                |            |
+ * |   |      |               |                |                |            |
+ * |   v      |               |                |                |            |
+ * |**************************************************************************
+ * |  shared  | shared (+)    |     shared (+) |    shared(+++) |  invalid   |
+ * |          |               |                |                |            |
+ * |non-shared| shared (+*)   |      private   |    slave (*)   | unbindable |
+ * ***************************************************************************
+ *
+ * (+)  the mount is moved to the destination. And is then propagated to
+ * 	all the mounts in the propagation tree of the destination mount.
+ * (+*)  the mount is moved to the destination.
+ * (+++)  the mount is moved to the destination and is then propagated to
+ * 	all the mounts belonging to the destination mount's propagation tree.
+ * 	the mount is marked as 'shared and slave'.
+ * (*)	the mount continues to be a slave at the new location.
+ *
+ * if the source mount is a tree, the operations explained above is
+ * applied to each mount in the tree.
+ * Must be called without spinlocks held, since this function can sleep
+ * in allocations.
+ */
+static int attach_recursive_mnt(struct vfsmount *source_mnt,
+			struct nameidata *nd, struct nameidata *parent_nd)
+{
+	LIST_HEAD(tree_list);
+	struct vfsmount *dest_mnt = nd->mnt;
+	struct dentry *dest_dentry = nd->dentry;
+	struct vfsmount *child, *p;
+
+	if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
+		return -EINVAL;
+
+	if (IS_MNT_SHARED(dest_mnt)) {
+		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
+			set_mnt_shared(p);
+	}
+
+	spin_lock(&vfsmount_lock);
+	if (parent_nd) {
+		detach_mnt(source_mnt, parent_nd);
+		attach_mnt(source_mnt, nd);
+		touch_namespace(current->namespace);
+	} else {
+		mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
+		commit_tree(source_mnt);
+	}
+
+	list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
+		list_del_init(&child->mnt_hash);
+		commit_tree(child);
+	}
+	spin_unlock(&vfsmount_lock);
+	return 0;
+}
+
 static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
 {
 	int err;
@@ -596,17 +823,8 @@
 		goto out_unlock;
 
 	err = -ENOENT;
-	spin_lock(&vfsmount_lock);
-	if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) {
-		struct list_head head;
-
-		attach_mnt(mnt, nd);
-		list_add_tail(&head, &mnt->mnt_list);
-		list_splice(&head, current->namespace->list.prev);
-		mntget(mnt);
-		err = 0;
-	}
-	spin_unlock(&vfsmount_lock);
+	if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
+		err = attach_recursive_mnt(mnt, nd, NULL);
 out_unlock:
 	up(&nd->dentry->d_inode->i_sem);
 	if (!err)
@@ -615,6 +833,27 @@
 }
 
 /*
+ * recursively change the type of the mountpoint.
+ */
+static int do_change_type(struct nameidata *nd, int flag)
+{
+	struct vfsmount *m, *mnt = nd->mnt;
+	int recurse = flag & MS_REC;
+	int type = flag & ~MS_REC;
+
+	if (nd->dentry != nd->mnt->mnt_root)
+		return -EINVAL;
+
+	down_write(&namespace_sem);
+	spin_lock(&vfsmount_lock);
+	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
+		change_mnt_propagation(m, type);
+	spin_unlock(&vfsmount_lock);
+	up_write(&namespace_sem);
+	return 0;
+}
+
+/*
  * do loopback mount.
  */
 static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
@@ -630,32 +869,34 @@
 	if (err)
 		return err;
 
-	down_write(&current->namespace->sem);
+	down_write(&namespace_sem);
 	err = -EINVAL;
-	if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) {
-		err = -ENOMEM;
-		if (recurse)
-			mnt = copy_tree(old_nd.mnt, old_nd.dentry);
-		else
-			mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
-	}
+	if (IS_MNT_UNBINDABLE(old_nd.mnt))
+ 		goto out;
 
-	if (mnt) {
-		/* stop bind mounts from expiring */
+	if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+		goto out;
+
+	err = -ENOMEM;
+	if (recurse)
+		mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0);
+	else
+		mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0);
+
+	if (!mnt)
+		goto out;
+
+	err = graft_tree(mnt, nd);
+	if (err) {
+		LIST_HEAD(umount_list);
 		spin_lock(&vfsmount_lock);
-		list_del_init(&mnt->mnt_expire);
+		umount_tree(mnt, 0, &umount_list);
 		spin_unlock(&vfsmount_lock);
-
-		err = graft_tree(mnt, nd);
-		if (err) {
-			spin_lock(&vfsmount_lock);
-			umount_tree(mnt);
-			spin_unlock(&vfsmount_lock);
-		} else
-			mntput(mnt);
+		release_mounts(&umount_list);
 	}
 
-	up_write(&current->namespace->sem);
+out:
+	up_write(&namespace_sem);
 	path_release(&old_nd);
 	return err;
 }
@@ -665,12 +906,11 @@
  * If you've mounted a non-root directory somewhere and want to do remount
  * on it - tough luck.
  */
-
 static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
 		      void *data)
 {
 	int err;
-	struct super_block * sb = nd->mnt->mnt_sb;
+	struct super_block *sb = nd->mnt->mnt_sb;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -684,13 +924,23 @@
 	down_write(&sb->s_umount);
 	err = do_remount_sb(sb, flags, data, 0);
 	if (!err)
-		nd->mnt->mnt_flags=mnt_flags;
+		nd->mnt->mnt_flags = mnt_flags;
 	up_write(&sb->s_umount);
 	if (!err)
 		security_sb_post_remount(nd->mnt, flags, data);
 	return err;
 }
 
+static inline int tree_contains_unbindable(struct vfsmount *mnt)
+{
+	struct vfsmount *p;
+	for (p = mnt; p; p = next_mnt(p, mnt)) {
+		if (IS_MNT_UNBINDABLE(p))
+			return 1;
+	}
+	return 0;
+}
+
 static int do_move_mount(struct nameidata *nd, char *old_name)
 {
 	struct nameidata old_nd, parent_nd;
@@ -704,8 +954,8 @@
 	if (err)
 		return err;
 
-	down_write(&current->namespace->sem);
-	while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+	down_write(&namespace_sem);
+	while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
 		;
 	err = -EINVAL;
 	if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
@@ -716,39 +966,47 @@
 	if (IS_DEADDIR(nd->dentry->d_inode))
 		goto out1;
 
-	spin_lock(&vfsmount_lock);
 	if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
-		goto out2;
+		goto out1;
 
 	err = -EINVAL;
 	if (old_nd.dentry != old_nd.mnt->mnt_root)
-		goto out2;
+		goto out1;
 
 	if (old_nd.mnt == old_nd.mnt->mnt_parent)
-		goto out2;
+		goto out1;
 
 	if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
 	      S_ISDIR(old_nd.dentry->d_inode->i_mode))
-		goto out2;
-
+		goto out1;
+	/*
+	 * Don't move a mount residing in a shared parent.
+	 */
+	if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent))
+		goto out1;
+	/*
+	 * Don't move a mount tree containing unbindable mounts to a destination
+	 * mount which is shared.
+	 */
+	if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt))
+		goto out1;
 	err = -ELOOP;
-	for (p = nd->mnt; p->mnt_parent!=p; p = p->mnt_parent)
+	for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent)
 		if (p == old_nd.mnt)
-			goto out2;
-	err = 0;
+			goto out1;
 
-	detach_mnt(old_nd.mnt, &parent_nd);
-	attach_mnt(old_nd.mnt, nd);
+	if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd)))
+		goto out1;
 
+	spin_lock(&vfsmount_lock);
 	/* if the mount is moved, it should no longer be expire
 	 * automatically */
 	list_del_init(&old_nd.mnt->mnt_expire);
-out2:
 	spin_unlock(&vfsmount_lock);
 out1:
 	up(&nd->dentry->d_inode->i_sem);
 out:
-	up_write(&current->namespace->sem);
+	up_write(&namespace_sem);
 	if (!err)
 		path_release(&parent_nd);
 	path_release(&old_nd);
@@ -787,9 +1045,9 @@
 {
 	int err;
 
-	down_write(&current->namespace->sem);
+	down_write(&namespace_sem);
 	/* Something was mounted here while we slept */
-	while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+	while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
 		;
 	err = -EINVAL;
 	if (!check_mnt(nd->mnt))
@@ -806,25 +1064,28 @@
 		goto unlock;
 
 	newmnt->mnt_flags = mnt_flags;
-	newmnt->mnt_namespace = current->namespace;
-	err = graft_tree(newmnt, nd);
+	if ((err = graft_tree(newmnt, nd)))
+		goto unlock;
 
-	if (err == 0 && fslist) {
+	if (fslist) {
 		/* add to the specified expiration list */
 		spin_lock(&vfsmount_lock);
 		list_add_tail(&newmnt->mnt_expire, fslist);
 		spin_unlock(&vfsmount_lock);
 	}
+	up_write(&namespace_sem);
+	return 0;
 
 unlock:
-	up_write(&current->namespace->sem);
+	up_write(&namespace_sem);
 	mntput(newmnt);
 	return err;
 }
 
 EXPORT_SYMBOL_GPL(do_add_mount);
 
-static void expire_mount(struct vfsmount *mnt, struct list_head *mounts)
+static void expire_mount(struct vfsmount *mnt, struct list_head *mounts,
+				struct list_head *umounts)
 {
 	spin_lock(&vfsmount_lock);
 
@@ -841,27 +1102,13 @@
 	 * Check that it is still dead: the count should now be 2 - as
 	 * contributed by the vfsmount parent and the mntget above
 	 */
-	if (atomic_read(&mnt->mnt_count) == 2) {
-		struct nameidata old_nd;
-
+	if (!propagate_mount_busy(mnt, 2)) {
 		/* delete from the namespace */
+		touch_namespace(mnt->mnt_namespace);
 		list_del_init(&mnt->mnt_list);
 		mnt->mnt_namespace = NULL;
-		detach_mnt(mnt, &old_nd);
+		umount_tree(mnt, 1, umounts);
 		spin_unlock(&vfsmount_lock);
-		path_release(&old_nd);
-
-		/*
-		 * Now lay it to rest if this was the last ref on the superblock
-		 */
-		if (atomic_read(&mnt->mnt_sb->s_active) == 1) {
-			/* last instance - try to be smart */
-			lock_kernel();
-			DQUOT_OFF(mnt->mnt_sb);
-			acct_auto_close(mnt->mnt_sb);
-			unlock_kernel();
-		}
-		mntput(mnt);
 	} else {
 		/*
 		 * Someone brought it back to life whilst we didn't have any
@@ -910,6 +1157,7 @@
 	 * - dispose of the corpse
 	 */
 	while (!list_empty(&graveyard)) {
+		LIST_HEAD(umounts);
 		mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire);
 		list_del_init(&mnt->mnt_expire);
 
@@ -921,13 +1169,12 @@
 		get_namespace(namespace);
 
 		spin_unlock(&vfsmount_lock);
-		down_write(&namespace->sem);
-		expire_mount(mnt, mounts);
-		up_write(&namespace->sem);
-
+		down_write(&namespace_sem);
+		expire_mount(mnt, mounts, &umounts);
+		up_write(&namespace_sem);
+		release_mounts(&umounts);
 		mntput(mnt);
 		put_namespace(namespace);
-
 		spin_lock(&vfsmount_lock);
 	}
 
@@ -942,8 +1189,8 @@
  * Note that this function differs from copy_from_user() in that it will oops
  * on bad values of `to', rather than returning a short copy.
  */
-static long
-exact_copy_from_user(void *to, const void __user *from, unsigned long n)
+static long exact_copy_from_user(void *to, const void __user * from,
+				 unsigned long n)
 {
 	char *t = to;
 	const char __user *f = from;
@@ -964,12 +1211,12 @@
 	return n;
 }
 
-int copy_mount_options(const void __user *data, unsigned long *where)
+int copy_mount_options(const void __user * data, unsigned long *where)
 {
 	int i;
 	unsigned long page;
 	unsigned long size;
-	
+
 	*where = 0;
 	if (!data)
 		return 0;
@@ -988,7 +1235,7 @@
 
 	i = size - exact_copy_from_user((void *)page, data, size);
 	if (!i) {
-		free_page(page); 
+		free_page(page);
 		return -EFAULT;
 	}
 	if (i != PAGE_SIZE)
@@ -1011,7 +1258,7 @@
  * Therefore, if this magic number is present, it carries no information
  * and must be discarded.
  */
-long do_mount(char * dev_name, char * dir_name, char *type_page,
+long do_mount(char *dev_name, char *dir_name, char *type_page,
 		  unsigned long flags, void *data_page)
 {
 	struct nameidata nd;
@@ -1039,7 +1286,7 @@
 		mnt_flags |= MNT_NODEV;
 	if (flags & MS_NOEXEC)
 		mnt_flags |= MNT_NOEXEC;
-	flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
+	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE);
 
 	/* ... and get the mountpoint */
 	retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
@@ -1055,6 +1302,8 @@
 				    data_page);
 	else if (flags & MS_BIND)
 		retval = do_loopback(&nd, dev_name, flags & MS_REC);
+	else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+		retval = do_change_type(&nd, flags);
 	else if (flags & MS_MOVE)
 		retval = do_move_mount(&nd, dev_name);
 	else
@@ -1091,14 +1340,16 @@
 		goto out;
 
 	atomic_set(&new_ns->count, 1);
-	init_rwsem(&new_ns->sem);
 	INIT_LIST_HEAD(&new_ns->list);
+	init_waitqueue_head(&new_ns->poll);
+	new_ns->event = 0;
 
-	down_write(&tsk->namespace->sem);
+	down_write(&namespace_sem);
 	/* First pass: copy the tree topology */
-	new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root);
+	new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root,
+					CL_COPY_ALL | CL_EXPIRE);
 	if (!new_ns->root) {
-		up_write(&tsk->namespace->sem);
+		up_write(&namespace_sem);
 		kfree(new_ns);
 		goto out;
 	}
@@ -1132,7 +1383,7 @@
 		p = next_mnt(p, namespace->root);
 		q = next_mnt(q, new_ns->root);
 	}
-	up_write(&tsk->namespace->sem);
+	up_write(&namespace_sem);
 
 	tsk->namespace = new_ns;
 
@@ -1161,7 +1412,7 @@
 	unsigned long dev_page;
 	char *dir_page;
 
-	retval = copy_mount_options (type, &type_page);
+	retval = copy_mount_options(type, &type_page);
 	if (retval < 0)
 		return retval;
 
@@ -1170,17 +1421,17 @@
 	if (IS_ERR(dir_page))
 		goto out1;
 
-	retval = copy_mount_options (dev_name, &dev_page);
+	retval = copy_mount_options(dev_name, &dev_page);
 	if (retval < 0)
 		goto out2;
 
-	retval = copy_mount_options (data, &data_page);
+	retval = copy_mount_options(data, &data_page);
 	if (retval < 0)
 		goto out3;
 
 	lock_kernel();
-	retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
-			  flags, (void*)data_page);
+	retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
+			  flags, (void *)data_page);
 	unlock_kernel();
 	free_page(data_page);
 
@@ -1249,9 +1500,11 @@
 		if (fs) {
 			atomic_inc(&fs->count);
 			task_unlock(p);
-			if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt)
+			if (fs->root == old_nd->dentry
+			    && fs->rootmnt == old_nd->mnt)
 				set_fs_root(fs, new_nd->mnt, new_nd->dentry);
-			if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt)
+			if (fs->pwd == old_nd->dentry
+			    && fs->pwdmnt == old_nd->mnt)
 				set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
 			put_fs_struct(fs);
 		} else
@@ -1281,8 +1534,8 @@
  *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
  *    first.
  */
-
-asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old)
+asmlinkage long sys_pivot_root(const char __user * new_root,
+			       const char __user * put_old)
 {
 	struct vfsmount *tmp;
 	struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
@@ -1293,14 +1546,15 @@
 
 	lock_kernel();
 
-	error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
+	error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+			    &new_nd);
 	if (error)
 		goto out0;
 	error = -EINVAL;
 	if (!check_mnt(new_nd.mnt))
 		goto out1;
 
-	error = __user_walk(put_old, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd);
+	error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
 	if (error)
 		goto out1;
 
@@ -1314,9 +1568,13 @@
 	user_nd.mnt = mntget(current->fs->rootmnt);
 	user_nd.dentry = dget(current->fs->root);
 	read_unlock(&current->fs->lock);
-	down_write(&current->namespace->sem);
+	down_write(&namespace_sem);
 	down(&old_nd.dentry->d_inode->i_sem);
 	error = -EINVAL;
+	if (IS_MNT_SHARED(old_nd.mnt) ||
+		IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
+		IS_MNT_SHARED(user_nd.mnt->mnt_parent))
+		goto out2;
 	if (!check_mnt(user_nd.mnt))
 		goto out2;
 	error = -ENOENT;
@@ -1356,6 +1614,7 @@
 	detach_mnt(user_nd.mnt, &root_parent);
 	attach_mnt(user_nd.mnt, &old_nd);     /* mount old root on put_old */
 	attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
+	touch_namespace(current->namespace);
 	spin_unlock(&vfsmount_lock);
 	chroot_fs_refs(&user_nd, &new_nd);
 	security_sb_post_pivotroot(&user_nd, &new_nd);
@@ -1364,7 +1623,7 @@
 	path_release(&parent_nd);
 out2:
 	up(&old_nd.dentry->d_inode->i_sem);
-	up_write(&current->namespace->sem);
+	up_write(&namespace_sem);
 	path_release(&user_nd);
 	path_release(&old_nd);
 out1:
@@ -1391,7 +1650,8 @@
 		panic("Can't allocate initial namespace");
 	atomic_set(&namespace->count, 1);
 	INIT_LIST_HEAD(&namespace->list);
-	init_rwsem(&namespace->sem);
+	init_waitqueue_head(&namespace->poll);
+	namespace->event = 0;
 	list_add(&mnt->mnt_list, &namespace->list);
 	namespace->root = mnt;
 	mnt->mnt_namespace = namespace;
@@ -1414,11 +1674,12 @@
 	unsigned int nr_hash;
 	int i;
 
-	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
-			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+	init_rwsem(&namespace_sem);
 
-	mount_hashtable = (struct list_head *)
-		__get_free_page(GFP_ATOMIC);
+	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
+			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
+
+	mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
 
 	if (!mount_hashtable)
 		panic("Failed to allocate mount hash table\n");
@@ -1440,7 +1701,7 @@
 	 * from the number of bits we can fit.
 	 */
 	nr_hash = 1UL << hash_bits;
-	hash_mask = nr_hash-1;
+	hash_mask = nr_hash - 1;
 
 	printk("Mount-cache hash table entries: %d\n", nr_hash);
 
@@ -1460,12 +1721,14 @@
 void __put_namespace(struct namespace *namespace)
 {
 	struct vfsmount *root = namespace->root;
+	LIST_HEAD(umount_list);
 	namespace->root = NULL;
 	spin_unlock(&vfsmount_lock);
-	down_write(&namespace->sem);
+	down_write(&namespace_sem);
 	spin_lock(&vfsmount_lock);
-	umount_tree(root);
+	umount_tree(root, 0, &umount_list);
 	spin_unlock(&vfsmount_lock);
-	up_write(&namespace->sem);
+	up_write(&namespace_sem);
+	release_mounts(&umount_list);
 	kfree(namespace);
 }
diff --git a/fs/pnode.c b/fs/pnode.c
new file mode 100644
index 0000000..aeeec8b
--- /dev/null
+++ b/fs/pnode.c
@@ -0,0 +1,305 @@
+/*
+ *  linux/fs/pnode.c
+ *
+ * (C) Copyright IBM Corporation 2005.
+ *	Released under GPL v2.
+ *	Author : Ram Pai (linuxram@us.ibm.com)
+ *
+ */
+#include <linux/namespace.h>
+#include <linux/mount.h>
+#include <linux/fs.h>
+#include "pnode.h"
+
+/* return the next shared peer mount of @p */
+static inline struct vfsmount *next_peer(struct vfsmount *p)
+{
+	return list_entry(p->mnt_share.next, struct vfsmount, mnt_share);
+}
+
+static inline struct vfsmount *first_slave(struct vfsmount *p)
+{
+	return list_entry(p->mnt_slave_list.next, struct vfsmount, mnt_slave);
+}
+
+static inline struct vfsmount *next_slave(struct vfsmount *p)
+{
+	return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave);
+}
+
+static int do_make_slave(struct vfsmount *mnt)
+{
+	struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master;
+	struct vfsmount *slave_mnt;
+
+	/*
+	 * slave 'mnt' to a peer mount that has the
+	 * same root dentry. If none is available than
+	 * slave it to anything that is available.
+	 */
+	while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
+	       peer_mnt->mnt_root != mnt->mnt_root) ;
+
+	if (peer_mnt == mnt) {
+		peer_mnt = next_peer(mnt);
+		if (peer_mnt == mnt)
+			peer_mnt = NULL;
+	}
+	list_del_init(&mnt->mnt_share);
+
+	if (peer_mnt)
+		master = peer_mnt;
+
+	if (master) {
+		list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
+			slave_mnt->mnt_master = master;
+		list_del(&mnt->mnt_slave);
+		list_add(&mnt->mnt_slave, &master->mnt_slave_list);
+		list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
+		INIT_LIST_HEAD(&mnt->mnt_slave_list);
+	} else {
+		struct list_head *p = &mnt->mnt_slave_list;
+		while (!list_empty(p)) {
+                        slave_mnt = list_entry(p->next,
+					struct vfsmount, mnt_slave);
+			list_del_init(&slave_mnt->mnt_slave);
+			slave_mnt->mnt_master = NULL;
+		}
+	}
+	mnt->mnt_master = master;
+	CLEAR_MNT_SHARED(mnt);
+	INIT_LIST_HEAD(&mnt->mnt_slave_list);
+	return 0;
+}
+
+void change_mnt_propagation(struct vfsmount *mnt, int type)
+{
+	if (type == MS_SHARED) {
+		set_mnt_shared(mnt);
+		return;
+	}
+	do_make_slave(mnt);
+	if (type != MS_SLAVE) {
+		list_del_init(&mnt->mnt_slave);
+		mnt->mnt_master = NULL;
+		if (type == MS_UNBINDABLE)
+			mnt->mnt_flags |= MNT_UNBINDABLE;
+	}
+}
+
+/*
+ * get the next mount in the propagation tree.
+ * @m: the mount seen last
+ * @origin: the original mount from where the tree walk initiated
+ */
+static struct vfsmount *propagation_next(struct vfsmount *m,
+					 struct vfsmount *origin)
+{
+	/* are there any slaves of this mount? */
+	if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+		return first_slave(m);
+
+	while (1) {
+		struct vfsmount *next;
+		struct vfsmount *master = m->mnt_master;
+
+		if ( master == origin->mnt_master ) {
+			next = next_peer(m);
+			return ((next == origin) ? NULL : next);
+		} else if (m->mnt_slave.next != &master->mnt_slave_list)
+			return next_slave(m);
+
+		/* back at master */
+		m = master;
+	}
+}
+
+/*
+ * return the source mount to be used for cloning
+ *
+ * @dest 	the current destination mount
+ * @last_dest  	the last seen destination mount
+ * @last_src  	the last seen source mount
+ * @type	return CL_SLAVE if the new mount has to be
+ * 		cloned as a slave.
+ */
+static struct vfsmount *get_source(struct vfsmount *dest,
+					struct vfsmount *last_dest,
+					struct vfsmount *last_src,
+					int *type)
+{
+	struct vfsmount *p_last_src = NULL;
+	struct vfsmount *p_last_dest = NULL;
+	*type = CL_PROPAGATION;;
+
+	if (IS_MNT_SHARED(dest))
+		*type |= CL_MAKE_SHARED;
+
+	while (last_dest != dest->mnt_master) {
+		p_last_dest = last_dest;
+		p_last_src = last_src;
+		last_dest = last_dest->mnt_master;
+		last_src = last_src->mnt_master;
+	}
+
+	if (p_last_dest) {
+		do {
+			p_last_dest = next_peer(p_last_dest);
+		} while (IS_MNT_NEW(p_last_dest));
+	}
+
+	if (dest != p_last_dest) {
+		*type |= CL_SLAVE;
+		return last_src;
+	} else
+		return p_last_src;
+}
+
+/*
+ * mount 'source_mnt' under the destination 'dest_mnt' at
+ * dentry 'dest_dentry'. And propagate that mount to
+ * all the peer and slave mounts of 'dest_mnt'.
+ * Link all the new mounts into a propagation tree headed at
+ * source_mnt. Also link all the new mounts using ->mnt_list
+ * headed at source_mnt's ->mnt_list
+ *
+ * @dest_mnt: destination mount.
+ * @dest_dentry: destination dentry.
+ * @source_mnt: source mount.
+ * @tree_list : list of heads of trees to be attached.
+ */
+int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
+		    struct vfsmount *source_mnt, struct list_head *tree_list)
+{
+	struct vfsmount *m, *child;
+	int ret = 0;
+	struct vfsmount *prev_dest_mnt = dest_mnt;
+	struct vfsmount *prev_src_mnt  = source_mnt;
+	LIST_HEAD(tmp_list);
+	LIST_HEAD(umount_list);
+
+	for (m = propagation_next(dest_mnt, dest_mnt); m;
+			m = propagation_next(m, dest_mnt)) {
+		int type;
+		struct vfsmount *source;
+
+		if (IS_MNT_NEW(m))
+			continue;
+
+		source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);
+
+		if (!(child = copy_tree(source, source->mnt_root, type))) {
+			ret = -ENOMEM;
+			list_splice(tree_list, tmp_list.prev);
+			goto out;
+		}
+
+		if (is_subdir(dest_dentry, m->mnt_root)) {
+			mnt_set_mountpoint(m, dest_dentry, child);
+			list_add_tail(&child->mnt_hash, tree_list);
+		} else {
+			/*
+			 * This can happen if the parent mount was bind mounted
+			 * on some subdirectory of a shared/slave mount.
+			 */
+			list_add_tail(&child->mnt_hash, &tmp_list);
+		}
+		prev_dest_mnt = m;
+		prev_src_mnt  = child;
+	}
+out:
+	spin_lock(&vfsmount_lock);
+	while (!list_empty(&tmp_list)) {
+		child = list_entry(tmp_list.next, struct vfsmount, mnt_hash);
+		list_del_init(&child->mnt_hash);
+		umount_tree(child, 0, &umount_list);
+	}
+	spin_unlock(&vfsmount_lock);
+	release_mounts(&umount_list);
+	return ret;
+}
+
+/*
+ * return true if the refcount is greater than count
+ */
+static inline int do_refcount_check(struct vfsmount *mnt, int count)
+{
+	int mycount = atomic_read(&mnt->mnt_count);
+	return (mycount > count);
+}
+
+/*
+ * check if the mount 'mnt' can be unmounted successfully.
+ * @mnt: the mount to be checked for unmount
+ * NOTE: unmounting 'mnt' would naturally propagate to all
+ * other mounts its parent propagates to.
+ * Check if any of these mounts that **do not have submounts**
+ * have more references than 'refcnt'. If so return busy.
+ */
+int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
+{
+	struct vfsmount *m, *child;
+	struct vfsmount *parent = mnt->mnt_parent;
+	int ret = 0;
+
+	if (mnt == parent)
+		return do_refcount_check(mnt, refcnt);
+
+	/*
+	 * quickly check if the current mount can be unmounted.
+	 * If not, we don't have to go checking for all other
+	 * mounts
+	 */
+	if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
+		return 1;
+
+	for (m = propagation_next(parent, parent); m;
+	     		m = propagation_next(m, parent)) {
+		child = __lookup_mnt(m, mnt->mnt_mountpoint, 0);
+		if (child && list_empty(&child->mnt_mounts) &&
+		    (ret = do_refcount_check(child, 1)))
+			break;
+	}
+	return ret;
+}
+
+/*
+ * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
+ * parent propagates to.
+ */
+static void __propagate_umount(struct vfsmount *mnt)
+{
+	struct vfsmount *parent = mnt->mnt_parent;
+	struct vfsmount *m;
+
+	BUG_ON(parent == mnt);
+
+	for (m = propagation_next(parent, parent); m;
+			m = propagation_next(m, parent)) {
+
+		struct vfsmount *child = __lookup_mnt(m,
+					mnt->mnt_mountpoint, 0);
+		/*
+		 * umount the child only if the child has no
+		 * other children
+		 */
+		if (child && list_empty(&child->mnt_mounts)) {
+			list_del(&child->mnt_hash);
+			list_add_tail(&child->mnt_hash, &mnt->mnt_hash);
+		}
+	}
+}
+
+/*
+ * collect all mounts that receive propagation from the mount in @list,
+ * and return these additional mounts in the same list.
+ * @list: the list of mounts to be unmounted.
+ */
+int propagate_umount(struct list_head *list)
+{
+	struct vfsmount *mnt;
+
+	list_for_each_entry(mnt, list, mnt_hash)
+		__propagate_umount(mnt);
+	return 0;
+}
diff --git a/fs/pnode.h b/fs/pnode.h
new file mode 100644
index 0000000..020e1bb
--- /dev/null
+++ b/fs/pnode.h
@@ -0,0 +1,37 @@
+/*
+ *  linux/fs/pnode.h
+ *
+ * (C) Copyright IBM Corporation 2005.
+ *	Released under GPL v2.
+ *
+ */
+#ifndef _LINUX_PNODE_H
+#define _LINUX_PNODE_H
+
+#include <linux/list.h>
+#include <linux/mount.h>
+
+#define IS_MNT_SHARED(mnt) (mnt->mnt_flags & MNT_SHARED)
+#define IS_MNT_SLAVE(mnt) (mnt->mnt_master)
+#define IS_MNT_NEW(mnt)  (!mnt->mnt_namespace)
+#define CLEAR_MNT_SHARED(mnt) (mnt->mnt_flags &= ~MNT_SHARED)
+#define IS_MNT_UNBINDABLE(mnt) (mnt->mnt_flags & MNT_UNBINDABLE)
+
+#define CL_EXPIRE    		0x01
+#define CL_SLAVE     		0x02
+#define CL_COPY_ALL 		0x04
+#define CL_MAKE_SHARED 		0x08
+#define CL_PROPAGATION 		0x10
+
+static inline void set_mnt_shared(struct vfsmount *mnt)
+{
+	mnt->mnt_flags &= ~MNT_PNODE_MASK;
+	mnt->mnt_flags |= MNT_SHARED;
+}
+
+void change_mnt_propagation(struct vfsmount *, int);
+int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *,
+		struct list_head *);
+int propagate_umount(struct list_head *);
+int propagate_mount_busy(struct vfsmount *, int);
+#endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a170450..634355e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -70,6 +70,7 @@
 #include <linux/seccomp.h>
 #include <linux/cpuset.h>
 #include <linux/audit.h>
+#include <linux/poll.h>
 #include "internal.h"
 
 /*
@@ -660,26 +661,38 @@
 #endif
 
 extern struct seq_operations mounts_op;
+struct proc_mounts {
+	struct seq_file m;
+	int event;
+};
+
 static int mounts_open(struct inode *inode, struct file *file)
 {
 	struct task_struct *task = proc_task(inode);
-	int ret = seq_open(file, &mounts_op);
+	struct namespace *namespace;
+	struct proc_mounts *p;
+	int ret = -EINVAL;
 
-	if (!ret) {
-		struct seq_file *m = file->private_data;
-		struct namespace *namespace;
-		task_lock(task);
-		namespace = task->namespace;
-		if (namespace)
-			get_namespace(namespace);
-		task_unlock(task);
+	task_lock(task);
+	namespace = task->namespace;
+	if (namespace)
+		get_namespace(namespace);
+	task_unlock(task);
 
-		if (namespace)
-			m->private = namespace;
-		else {
-			seq_release(inode, file);
-			ret = -EINVAL;
+	if (namespace) {
+		ret = -ENOMEM;
+		p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
+		if (p) {
+			file->private_data = &p->m;
+			ret = seq_open(file, &mounts_op);
+			if (!ret) {
+				p->m.private = namespace;
+				p->event = namespace->event;
+				return 0;
+			}
+			kfree(p);
 		}
+		put_namespace(namespace);
 	}
 	return ret;
 }
@@ -692,11 +705,30 @@
 	return seq_release(inode, file);
 }
 
+static unsigned mounts_poll(struct file *file, poll_table *wait)
+{
+	struct proc_mounts *p = file->private_data;
+	struct namespace *ns = p->m.private;
+	unsigned res = 0;
+
+	poll_wait(file, &ns->poll, wait);
+
+	spin_lock(&vfsmount_lock);
+	if (p->event != ns->event) {
+		p->event = ns->event;
+		res = POLLERR;
+	}
+	spin_unlock(&vfsmount_lock);
+
+	return res;
+}
+
 static struct file_operations proc_mounts_operations = {
 	.open		= mounts_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= mounts_release,
+	.poll		= mounts_poll,
 };
 
 #define PROC_BLOCK_SIZE	(3*1024)		/* 4K page size but our output routines use some slack for overruns */
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 38ef913..7c40570 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -28,13 +28,17 @@
  */
 int seq_open(struct file *file, struct seq_operations *op)
 {
-	struct seq_file *p = kmalloc(sizeof(*p), GFP_KERNEL);
-	if (!p)
-		return -ENOMEM;
+	struct seq_file *p = file->private_data;
+
+	if (!p) {
+		p = kmalloc(sizeof(*p), GFP_KERNEL);
+		if (!p)
+			return -ENOMEM;
+		file->private_data = p;
+	}
 	memset(p, 0, sizeof(*p));
 	sema_init(&p->sem, 1);
 	p->op = op;
-	file->private_data = p;
 
 	/*
 	 * Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/fs/super.c b/fs/super.c
index eed6c31..6689dde 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -171,6 +171,7 @@
 	if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
 		s->s_count -= S_BIAS-1;
 		spin_unlock(&sb_lock);
+		DQUOT_OFF(s);
 		down_write(&s->s_umount);
 		fs->kill_sb(s);
 		put_filesystem(fs);
diff --git a/include/asm-arm/arch-realview/entry-macro.S b/include/asm-arm/arch-realview/entry-macro.S
index 2712ba7..4df469b 100644
--- a/include/asm-arm/arch-realview/entry-macro.S
+++ b/include/asm-arm/arch-realview/entry-macro.S
@@ -47,3 +47,17 @@
 		cmpcs	\irqnr, \irqnr
 
 		.endm
+
+		/* We assume that irqstat (the raw value of the IRQ acknowledge
+		 * register) is preserved from the macro above.
+		 * If there is an IPI, we immediately signal end of interrupt on the
+		 * controller, since this requires the original irqstat value which
+		 * we won't easily be able to recreate later.
+		 */
+
+		.macro test_for_ipi, irqnr, irqstat, base, tmp
+		bic	\irqnr, \irqstat, #0x1c00
+		cmp	\irqnr, #16
+		strcc	\irqstat, [\base, #GIC_CPU_EOI]
+		cmpcs	\irqnr, \irqnr
+		.endm
diff --git a/include/asm-arm/arch-realview/platform.h b/include/asm-arm/arch-realview/platform.h
index 4b6de13..aef9b36 100644
--- a/include/asm-arm/arch-realview/platform.h
+++ b/include/asm-arm/arch-realview/platform.h
@@ -203,8 +203,14 @@
 	/* Reserved 0x1001A000 - 0x1001FFFF */
 #define REALVIEW_CLCD_BASE            0x10020000	/* CLCD */
 #define REALVIEW_DMAC_BASE            0x10030000	/* DMA controller */
+#ifndef CONFIG_REALVIEW_MPCORE
 #define REALVIEW_GIC_CPU_BASE         0x10040000	/* Generic interrupt controller CPU interface */
 #define REALVIEW_GIC_DIST_BASE        0x10041000	/* Generic interrupt controller distributor */
+#else
+#define REALVIEW_MPCORE_SCU_BASE	0x10100000	/*  SCU registers */
+#define REALVIEW_GIC_CPU_BASE		0x10100100	/* Generic interrupt controller CPU interface */
+#define REALVIEW_GIC_DIST_BASE		0x10101000	/* Generic interrupt controller distributor */
+#endif
 #define REALVIEW_SMC_BASE             0x10080000	/* SMC */
 	/* Reserved 0x10090000 - 0x100EFFFF */
 
@@ -265,6 +271,7 @@
  *  Interrupts - bit assignment (primary)
  * ------------------------------------------------------------------------
  */
+#ifndef CONFIG_REALVIEW_MPCORE
 #define INT_WDOGINT			0	/* Watchdog timer */
 #define INT_SOFTINT			1	/* Software interrupt */
 #define INT_COMMRx			2	/* Debug Comm Rx interrupt */
@@ -297,6 +304,55 @@
 #define INT_USB				29	/* USB controller */
 #define INT_TSPENINT			30	/* Touchscreen pen */
 #define INT_TSKPADINT			31	/* Touchscreen keypad */
+#else
+#define INT_LOCALTIMER			29
+#define INT_LOCALWDOG			30
+
+#define INT_AACI			0
+#define INT_TIMERINT0_1			1
+#define INT_TIMERINT2_3			2
+#define INT_USB				3
+#define INT_UARTINT0			4
+#define INT_UARTINT1			5
+#define INT_RTCINT			6
+#define INT_KMI0			7
+#define INT_KMI1			8
+#define INT_ETH				9
+#define INT_EB_IRQ1			10	/* main GIC */
+#define INT_EB_IRQ2			11	/* tile GIC */
+#define INT_EB_FIQ1			12	/* main GIC */
+#define INT_EB_FIQ2			13	/* tile GIC */
+#define INT_MMCI0A			14
+#define INT_MMCI0B			15
+
+#define INT_PMU_CPU0			17
+#define INT_PMU_CPU1			18
+#define INT_PMU_CPU2			19
+#define INT_PMU_CPU3			20
+#define INT_PMU_SCU0			21
+#define INT_PMU_SCU1			22
+#define INT_PMU_SCU2			23
+#define INT_PMU_SCU3			24
+#define INT_PMU_SCU4			25
+#define INT_PMU_SCU5			26
+#define INT_PMU_SCU6			27
+#define INT_PMU_SCU7			28
+
+#define INT_L220_EVENT			29
+#define INT_L220_SLAVE			30
+#define INT_L220_DECODE			31
+
+#define INT_UARTINT2			-1
+#define INT_UARTINT3			-1
+#define INT_CLCDINT			-1
+#define INT_DMAINT			-1
+#define INT_WDOGINT			-1
+#define INT_GPIOINT0			-1
+#define INT_GPIOINT1			-1
+#define INT_GPIOINT2			-1
+#define INT_SCIINT			-1
+#define INT_SSPINT			-1
+#endif
 
 /* 
  *  Interrupt bit positions
diff --git a/include/asm-arm/arch-realview/smp.h b/include/asm-arm/arch-realview/smp.h
new file mode 100644
index 0000000..fc87783
--- /dev/null
+++ b/include/asm-arm/arch-realview/smp.h
@@ -0,0 +1,31 @@
+#ifndef ASMARM_ARCH_SMP_H
+#define ASMARM_ARCH_SMP_H
+
+#include <linux/config.h>
+
+#include <asm/hardware/gic.h>
+
+#define hard_smp_processor_id()			\
+	({						\
+		unsigned int cpunum;			\
+		__asm__("mrc p15, 0, %0, c0, c0, 5"	\
+			: "=r" (cpunum));		\
+		cpunum &= 0x0F;				\
+	})
+
+/*
+ * We use IRQ1 as the IPI
+ */
+static inline void smp_cross_call(cpumask_t callmap)
+{
+	gic_raise_softirq(callmap, 1);
+}
+
+/*
+ * Do nothing on MPcore.
+ */
+static inline void smp_cross_call_done(cpumask_t callmap)
+{
+}
+
+#endif
diff --git a/include/asm-arm/hardware/arm_scu.h b/include/asm-arm/hardware/arm_scu.h
new file mode 100644
index 0000000..9903f60
--- /dev/null
+++ b/include/asm-arm/hardware/arm_scu.h
@@ -0,0 +1,13 @@
+#ifndef ASMARM_HARDWARE_ARM_SCU_H
+#define ASMARM_HARDWARE_ARM_SCU_H
+
+/*
+ * SCU registers
+ */
+#define SCU_CTRL		0x00
+#define SCU_CONFIG		0x04
+#define SCU_CPU_STATUS		0x08
+#define SCU_INVALIDATE		0x0c
+#define SCU_FPGA_REVISION	0x10
+
+#endif
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index ef436b9..9d41548 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -120,6 +120,7 @@
 
 #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT)
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 
 typedef union ia64_va {
 	struct {
diff --git a/include/asm-m68k/kbio.h b/include/asm-m68k/kbio.h
deleted file mode 100644
index e1fbf8f..0000000
--- a/include/asm-m68k/kbio.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-sparc/kbio.h>
diff --git a/include/asm-m68k/vuid_event.h b/include/asm-m68k/vuid_event.h
deleted file mode 100644
index 52ecb52..0000000
--- a/include/asm-m68k/vuid_event.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _M68K_VUID_EVENT_H
-#define _M68K_VUID_EVENT_H
-#include <asm-sparc/vuid_event.h>
-#endif
diff --git a/include/asm-sparc/audioio.h b/include/asm-sparc/audioio.h
deleted file mode 100644
index cf16173..0000000
--- a/include/asm-sparc/audioio.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * include/asm-sparc/audioio.h
- *
- * Sparc Audio Midlayer
- * Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu)
- */
-
-#ifndef _AUDIOIO_H_
-#define _AUDIOIO_H_
-
-/*
- *	SunOS/Solaris /dev/audio interface
- */
-
-#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/ioctl.h>
-#endif
-
-/*
- * This structure contains state information for audio device IO streams.
- */
-typedef struct audio_prinfo {
-	/*
-	 * The following values describe the audio data encoding.
-	 */
-	unsigned int sample_rate;	/* samples per second */
-	unsigned int channels;	/* number of interleaved channels */
-	unsigned int precision;	/* bit-width of each sample */
-	unsigned int encoding;	/* data encoding method */
-
-	/*
-	 * The following values control audio device configuration
-	 */
-	unsigned int gain;		/* gain level: 0 - 255 */
-	unsigned int port;		/* selected I/O port (see below) */
-	unsigned int avail_ports;	/* available I/O ports (see below) */
-	unsigned int _xxx[2];		/* Reserved for future use */
-
-	unsigned int buffer_size;	/* I/O buffer size */
-
-	/*
-	 * The following values describe driver state
-	 */
-	unsigned int samples;		/* number of samples converted */
-	unsigned int eof;		/* End Of File counter (play only) */
-
-	unsigned char	pause;		/* non-zero for pause, zero to resume */
-	unsigned char	error;		/* non-zero if overflow/underflow */
-	unsigned char	waiting;	/* non-zero if a process wants access */
-	unsigned char balance;	/* stereo channel balance */
-
-	unsigned short minordev;
-
-	/*
-	 * The following values are read-only state flags
-	 */
-	unsigned char open;		/* non-zero if open access permitted */
-	unsigned char active;		/* non-zero if I/O is active */
-} audio_prinfo_t;
-
-
-/*
- * This structure describes the current state of the audio device.
- */
-typedef struct audio_info {
-	/*
-	 * Per-stream information
-	 */
-	audio_prinfo_t play;	/* output status information */
-	audio_prinfo_t record;	/* input status information */
-
-	/*
-	 * Per-unit/channel information
-	 */
-	unsigned int monitor_gain;	/* input to output mix: 0 - 255 */
-	unsigned char output_muted;	/* non-zero if output is muted */
-	unsigned char _xxx[3];	/* Reserved for future use */
-	unsigned int _yyy[3];		/* Reserved for future use */
-} audio_info_t;
-
-
-/*
- * Audio encoding types
- */
-#define	AUDIO_ENCODING_NONE	(0)	/* no encoding assigned	  */
-#define	AUDIO_ENCODING_ULAW	(1)	/* u-law encoding	  */
-#define	AUDIO_ENCODING_ALAW	(2)	/* A-law encoding	  */
-#define	AUDIO_ENCODING_LINEAR	(3)	/* Linear PCM encoding	  */
-#define AUDIO_ENCODING_FLOAT    (4)     /* IEEE float (-1. <-> +1.) */
-#define	AUDIO_ENCODING_DVI	(104)	/* DVI ADPCM		  */
-#define	AUDIO_ENCODING_LINEAR8	(105)	/* 8 bit UNSIGNED	  */
-#define	AUDIO_ENCODING_LINEARLE	(106)	/* Linear PCM LE encoding */
-
-/*
- * These ranges apply to record, play, and monitor gain values
- */
-#define	AUDIO_MIN_GAIN	(0)	/* minimum gain value */
-#define	AUDIO_MAX_GAIN	(255)	/* maximum gain value */
-
-/*
- * These values apply to the balance field to adjust channel gain values
- */
-#define	AUDIO_LEFT_BALANCE	(0)	/* left channel only	*/
-#define	AUDIO_MID_BALANCE	(32)	/* equal left/right channel */
-#define	AUDIO_RIGHT_BALANCE	(64)	/* right channel only	*/
-#define	AUDIO_BALANCE_SHIFT	(3)
-
-/*
- * Generic minimum/maximum limits for number of channels, both modes
- */
-#define	AUDIO_MIN_PLAY_CHANNELS	(1)
-#define	AUDIO_MAX_PLAY_CHANNELS	(4)
-#define	AUDIO_MIN_REC_CHANNELS	(1)
-#define	AUDIO_MAX_REC_CHANNELS	(4)
-
-/*
- * Generic minimum/maximum limits for sample precision
- */
-#define	AUDIO_MIN_PLAY_PRECISION	(8)
-#define	AUDIO_MAX_PLAY_PRECISION	(32)
-#define	AUDIO_MIN_REC_PRECISION		(8)
-#define	AUDIO_MAX_REC_PRECISION		(32)
-
-/*
- * Define some convenient names for typical audio ports
- */
-/*
- * output ports (several may be enabled simultaneously)
- */
-#define	AUDIO_SPEAKER		0x01	/* output to built-in speaker */
-#define	AUDIO_HEADPHONE		0x02	/* output to headphone jack */
-#define	AUDIO_LINE_OUT		0x04	/* output to line out	 */
-
-/*
- * input ports (usually only one at a time)
- */
-#define	AUDIO_MICROPHONE	0x01	/* input from microphone */
-#define	AUDIO_LINE_IN		0x02	/* input from line in	 */
-#define	AUDIO_CD		0x04	/* input from on-board CD inputs */
-#define	AUDIO_INTERNAL_CD_IN	AUDIO_CD	/* input from internal CDROM */
-#define AUDIO_ANALOG_LOOPBACK   0x40    /* input from output */
-
-
-/*
- * This macro initializes an audio_info structure to 'harmless' values.
- * Note that (~0) might not be a harmless value for a flag that was
- * a signed int.
- */
-#define	AUDIO_INITINFO(i)	{					\
-	unsigned int	*__x__;						\
-	for (__x__ = (unsigned int *)(i);				\
-	    (char *) __x__ < (((char *)(i)) + sizeof (audio_info_t));	\
-	    *__x__++ = ~0);						\
-}
-
-/*
- * These allow testing for what the user wants to set 
- */
-#define AUD_INITVALUE   (~0)
-#define Modify(X)       ((unsigned int)(X) != AUD_INITVALUE)
-#define Modifys(X)      ((X) != (unsigned short)AUD_INITVALUE)
-#define Modifyc(X)      ((X) != (unsigned char)AUD_INITVALUE)
-
-/*
- * Parameter for the AUDIO_GETDEV ioctl to determine current
- * audio devices.
- */
-#define	MAX_AUDIO_DEV_LEN	(16)
-typedef struct audio_device {
-	char name[MAX_AUDIO_DEV_LEN];
-	char version[MAX_AUDIO_DEV_LEN];
-	char config[MAX_AUDIO_DEV_LEN];
-} audio_device_t;
-
-
-/*
- * Ioctl calls for the audio device.
- */
-
-/*
- * AUDIO_GETINFO retrieves the current state of the audio device.
- *
- * AUDIO_SETINFO copies all fields of the audio_info structure whose
- * values are not set to the initialized value (-1) to the device state.
- * It performs an implicit AUDIO_GETINFO to return the new state of the
- * device.  Note that the record.samples and play.samples fields are set
- * to the last value before the AUDIO_SETINFO took effect.  This allows
- * an application to reset the counters while atomically retrieving the
- * last value.
- *
- * AUDIO_DRAIN suspends the calling process until the write buffers are
- * empty.
- *
- * AUDIO_GETDEV returns a structure of type audio_device_t which contains
- * three strings.  The string "name" is a short identifying string (for
- * example, the SBus Fcode name string), the string "version" identifies
- * the current version of the device, and the "config" string identifies
- * the specific configuration of the audio stream.  All fields are
- * device-dependent -- see the device specific manual pages for details.
- *
- * AUDIO_GETDEV_SUNOS returns a number which is an audio device defined 
- * herein (making it not too portable)
- *
- * AUDIO_FLUSH stops all playback and recording, clears all queued buffers, 
- * resets error counters, and restarts recording and playback as appropriate
- * for the current sampling mode.
- */
-#define	AUDIO_GETINFO	_IOR('A', 1, audio_info_t)
-#define	AUDIO_SETINFO	_IOWR('A', 2, audio_info_t)
-#define	AUDIO_DRAIN	_IO('A', 3)
-#define	AUDIO_GETDEV	_IOR('A', 4, audio_device_t)
-#define	AUDIO_GETDEV_SUNOS	_IOR('A', 4, int)
-#define AUDIO_FLUSH     _IO('A', 5)
-
-/* Define possible audio hardware configurations for 
- * old SunOS-style AUDIO_GETDEV ioctl */
-#define AUDIO_DEV_UNKNOWN       (0)     /* not defined */
-#define AUDIO_DEV_AMD           (1)     /* audioamd device */
-#define AUDIO_DEV_SPEAKERBOX    (2)     /* dbri device with speakerbox */
-#define AUDIO_DEV_CODEC         (3)     /* dbri device (internal speaker) */
-#define AUDIO_DEV_CS4231        (5)     /* cs4231 device */
-
-/*
- * The following ioctl sets the audio device into an internal loopback mode,
- * if the hardware supports this.  The argument is TRUE to set loopback,
- * FALSE to reset to normal operation.  If the hardware does not support
- * internal loopback, the ioctl should fail with EINVAL.
- * Causes ADC data to be digitally mixed in and sent to the DAC.
- */
-#define	AUDIO_DIAG_LOOPBACK	_IOW('A', 101, int)
-
-#endif /* _AUDIOIO_H_ */
diff --git a/include/asm-sparc/kbio.h b/include/asm-sparc/kbio.h
deleted file mode 100644
index 3cf496b..0000000
--- a/include/asm-sparc/kbio.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __LINUX_KBIO_H
-#define __LINUX_KBIO_H
-
-/* Return keyboard type */
-#define KIOCTYPE    _IOR('k', 9, int)
-/* Return Keyboard layout */
-#define KIOCLAYOUT  _IOR('k', 20, int)
-
-enum {
-    TR_NONE,
-    TR_ASCII,			/* keyboard is in regular state */
-    TR_EVENT,			/* keystrokes sent as firm events */
-    TR_UNTRANS_EVENT		/* EVENT+up and down+no translation */
-};
-
-/* Return the current keyboard translation */
-#define KIOCGTRANS  _IOR('k', 5, int)
-/* Set the keyboard translation */
-#define KIOCTRANS   _IOW('k', 0, int)
-
-/* Send a keyboard command */
-#define KIOCCMD     _IOW('k', 8, int)
-
-/* Return if keystrokes are being sent to /dev/kbd */
-
-/* Set routing of keystrokes to /dev/kbd */
-#define KIOCSDIRECT _IOW('k', 10, int)
-
-/* Set keyboard leds */
-#define KIOCSLED    _IOW('k', 14, unsigned char)
-
-/* Get keyboard leds */
-#define KIOCGLED    _IOR('k', 15, unsigned char)
-
-/* Used by KIOC[GS]RATE */
-struct kbd_rate {
-	unsigned char delay;	/* Delay in Hz before first repeat.	*/
-	unsigned char rate;	/* In characters per second (0..50).	*/
-};
-
-/* Set keyboard rate */
-#define KIOCSRATE   _IOW('k', 40, struct kbd_rate)
-
-/* Get keyboard rate */
-#define KIOCGRATE   _IOW('k', 41, struct kbd_rate)
-
-/* Top bit records if the key is up or down */
-#define KBD_UP      0x80
-
-/* Usable information */
-#define KBD_KEYMASK 0x7f
-
-/* All keys up */
-#define KBD_IDLE    0x75
-
-#endif /* __LINUX_KBIO_H */
diff --git a/include/asm-sparc/termios.h b/include/asm-sparc/termios.h
index 0a8ad4c..d05f83c 100644
--- a/include/asm-sparc/termios.h
+++ b/include/asm-sparc/termios.h
@@ -38,15 +38,6 @@
 	int st_columns; /* Columns on the terminal */
 };
 
-/* Used for packet mode */
-#define TIOCPKT_DATA		 0
-#define TIOCPKT_FLUSHREAD	 1
-#define TIOCPKT_FLUSHWRITE	 2
-#define TIOCPKT_STOP		 4
-#define TIOCPKT_START		 8
-#define TIOCPKT_NOSTOP		16
-#define TIOCPKT_DOSTOP		32
-
 struct winsize {
 	unsigned short ws_row;
 	unsigned short ws_col;
diff --git a/include/asm-sparc/vuid_event.h b/include/asm-sparc/vuid_event.h
deleted file mode 100644
index 7781e9f2..0000000
--- a/include/asm-sparc/vuid_event.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SunOS Virtual User Input Device (VUID) compatibility */
-
-
-typedef struct firm_event {
-	unsigned short id;	  /* tag for this event */
-	unsigned char  pair_type; /* unused by X11 */
-        unsigned char  pair;	  /* unused by X11 */
-        int            value;	  /* VKEY_UP, VKEY_DOWN or delta */
-        struct timeval time;
-} Firm_event;
-
-enum {
-    FE_PAIR_NONE,
-    FE_PAIR_SET,
-    FE_PAIR_DELTA,
-    FE_PAIR_ABSOLUTE
-};
-
-/* VUID stream formats */
-#define VUID_NATIVE     0	/* Native byte stream format */
-#define VUID_FIRM_EVENT 1	/* send firm_event structures */
-
-/* ioctls */
-    /* Set input device byte stream format (any of VUID_{NATIVE,FIRM_EVENT}) */
-#define VUIDSFORMAT   _IOW('v', 1, int)
-    /* Retrieve input device byte stream format */
-#define VUIDGFORMAT   _IOR('v', 2, int)
-
-/* Possible tag values */
-/*    mouse buttons: */
-#define MS_LEFT         0x7f20
-#define MS_MIDDLE       0x7f21
-#define MS_RIGHT        0x7f22
-/*    motion: */
-#define LOC_X_DELTA     0x7f80
-#define LOC_Y_DELTA     0x7f81
-#define LOC_X_ABSOLUTE  0x7f82  /* X compat, unsupported */
-#define LOC_Y_ABSOLUTE  0x7f83  /* X compat, unsupported */
-
-#define VKEY_UP   0
-#define VKEY_DOWN 1
diff --git a/include/asm-sparc64/audioio.h b/include/asm-sparc64/audioio.h
deleted file mode 100644
index cf16173..0000000
--- a/include/asm-sparc64/audioio.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * include/asm-sparc/audioio.h
- *
- * Sparc Audio Midlayer
- * Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu)
- */
-
-#ifndef _AUDIOIO_H_
-#define _AUDIOIO_H_
-
-/*
- *	SunOS/Solaris /dev/audio interface
- */
-
-#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/ioctl.h>
-#endif
-
-/*
- * This structure contains state information for audio device IO streams.
- */
-typedef struct audio_prinfo {
-	/*
-	 * The following values describe the audio data encoding.
-	 */
-	unsigned int sample_rate;	/* samples per second */
-	unsigned int channels;	/* number of interleaved channels */
-	unsigned int precision;	/* bit-width of each sample */
-	unsigned int encoding;	/* data encoding method */
-
-	/*
-	 * The following values control audio device configuration
-	 */
-	unsigned int gain;		/* gain level: 0 - 255 */
-	unsigned int port;		/* selected I/O port (see below) */
-	unsigned int avail_ports;	/* available I/O ports (see below) */
-	unsigned int _xxx[2];		/* Reserved for future use */
-
-	unsigned int buffer_size;	/* I/O buffer size */
-
-	/*
-	 * The following values describe driver state
-	 */
-	unsigned int samples;		/* number of samples converted */
-	unsigned int eof;		/* End Of File counter (play only) */
-
-	unsigned char	pause;		/* non-zero for pause, zero to resume */
-	unsigned char	error;		/* non-zero if overflow/underflow */
-	unsigned char	waiting;	/* non-zero if a process wants access */
-	unsigned char balance;	/* stereo channel balance */
-
-	unsigned short minordev;
-
-	/*
-	 * The following values are read-only state flags
-	 */
-	unsigned char open;		/* non-zero if open access permitted */
-	unsigned char active;		/* non-zero if I/O is active */
-} audio_prinfo_t;
-
-
-/*
- * This structure describes the current state of the audio device.
- */
-typedef struct audio_info {
-	/*
-	 * Per-stream information
-	 */
-	audio_prinfo_t play;	/* output status information */
-	audio_prinfo_t record;	/* input status information */
-
-	/*
-	 * Per-unit/channel information
-	 */
-	unsigned int monitor_gain;	/* input to output mix: 0 - 255 */
-	unsigned char output_muted;	/* non-zero if output is muted */
-	unsigned char _xxx[3];	/* Reserved for future use */
-	unsigned int _yyy[3];		/* Reserved for future use */
-} audio_info_t;
-
-
-/*
- * Audio encoding types
- */
-#define	AUDIO_ENCODING_NONE	(0)	/* no encoding assigned	  */
-#define	AUDIO_ENCODING_ULAW	(1)	/* u-law encoding	  */
-#define	AUDIO_ENCODING_ALAW	(2)	/* A-law encoding	  */
-#define	AUDIO_ENCODING_LINEAR	(3)	/* Linear PCM encoding	  */
-#define AUDIO_ENCODING_FLOAT    (4)     /* IEEE float (-1. <-> +1.) */
-#define	AUDIO_ENCODING_DVI	(104)	/* DVI ADPCM		  */
-#define	AUDIO_ENCODING_LINEAR8	(105)	/* 8 bit UNSIGNED	  */
-#define	AUDIO_ENCODING_LINEARLE	(106)	/* Linear PCM LE encoding */
-
-/*
- * These ranges apply to record, play, and monitor gain values
- */
-#define	AUDIO_MIN_GAIN	(0)	/* minimum gain value */
-#define	AUDIO_MAX_GAIN	(255)	/* maximum gain value */
-
-/*
- * These values apply to the balance field to adjust channel gain values
- */
-#define	AUDIO_LEFT_BALANCE	(0)	/* left channel only	*/
-#define	AUDIO_MID_BALANCE	(32)	/* equal left/right channel */
-#define	AUDIO_RIGHT_BALANCE	(64)	/* right channel only	*/
-#define	AUDIO_BALANCE_SHIFT	(3)
-
-/*
- * Generic minimum/maximum limits for number of channels, both modes
- */
-#define	AUDIO_MIN_PLAY_CHANNELS	(1)
-#define	AUDIO_MAX_PLAY_CHANNELS	(4)
-#define	AUDIO_MIN_REC_CHANNELS	(1)
-#define	AUDIO_MAX_REC_CHANNELS	(4)
-
-/*
- * Generic minimum/maximum limits for sample precision
- */
-#define	AUDIO_MIN_PLAY_PRECISION	(8)
-#define	AUDIO_MAX_PLAY_PRECISION	(32)
-#define	AUDIO_MIN_REC_PRECISION		(8)
-#define	AUDIO_MAX_REC_PRECISION		(32)
-
-/*
- * Define some convenient names for typical audio ports
- */
-/*
- * output ports (several may be enabled simultaneously)
- */
-#define	AUDIO_SPEAKER		0x01	/* output to built-in speaker */
-#define	AUDIO_HEADPHONE		0x02	/* output to headphone jack */
-#define	AUDIO_LINE_OUT		0x04	/* output to line out	 */
-
-/*
- * input ports (usually only one at a time)
- */
-#define	AUDIO_MICROPHONE	0x01	/* input from microphone */
-#define	AUDIO_LINE_IN		0x02	/* input from line in	 */
-#define	AUDIO_CD		0x04	/* input from on-board CD inputs */
-#define	AUDIO_INTERNAL_CD_IN	AUDIO_CD	/* input from internal CDROM */
-#define AUDIO_ANALOG_LOOPBACK   0x40    /* input from output */
-
-
-/*
- * This macro initializes an audio_info structure to 'harmless' values.
- * Note that (~0) might not be a harmless value for a flag that was
- * a signed int.
- */
-#define	AUDIO_INITINFO(i)	{					\
-	unsigned int	*__x__;						\
-	for (__x__ = (unsigned int *)(i);				\
-	    (char *) __x__ < (((char *)(i)) + sizeof (audio_info_t));	\
-	    *__x__++ = ~0);						\
-}
-
-/*
- * These allow testing for what the user wants to set 
- */
-#define AUD_INITVALUE   (~0)
-#define Modify(X)       ((unsigned int)(X) != AUD_INITVALUE)
-#define Modifys(X)      ((X) != (unsigned short)AUD_INITVALUE)
-#define Modifyc(X)      ((X) != (unsigned char)AUD_INITVALUE)
-
-/*
- * Parameter for the AUDIO_GETDEV ioctl to determine current
- * audio devices.
- */
-#define	MAX_AUDIO_DEV_LEN	(16)
-typedef struct audio_device {
-	char name[MAX_AUDIO_DEV_LEN];
-	char version[MAX_AUDIO_DEV_LEN];
-	char config[MAX_AUDIO_DEV_LEN];
-} audio_device_t;
-
-
-/*
- * Ioctl calls for the audio device.
- */
-
-/*
- * AUDIO_GETINFO retrieves the current state of the audio device.
- *
- * AUDIO_SETINFO copies all fields of the audio_info structure whose
- * values are not set to the initialized value (-1) to the device state.
- * It performs an implicit AUDIO_GETINFO to return the new state of the
- * device.  Note that the record.samples and play.samples fields are set
- * to the last value before the AUDIO_SETINFO took effect.  This allows
- * an application to reset the counters while atomically retrieving the
- * last value.
- *
- * AUDIO_DRAIN suspends the calling process until the write buffers are
- * empty.
- *
- * AUDIO_GETDEV returns a structure of type audio_device_t which contains
- * three strings.  The string "name" is a short identifying string (for
- * example, the SBus Fcode name string), the string "version" identifies
- * the current version of the device, and the "config" string identifies
- * the specific configuration of the audio stream.  All fields are
- * device-dependent -- see the device specific manual pages for details.
- *
- * AUDIO_GETDEV_SUNOS returns a number which is an audio device defined 
- * herein (making it not too portable)
- *
- * AUDIO_FLUSH stops all playback and recording, clears all queued buffers, 
- * resets error counters, and restarts recording and playback as appropriate
- * for the current sampling mode.
- */
-#define	AUDIO_GETINFO	_IOR('A', 1, audio_info_t)
-#define	AUDIO_SETINFO	_IOWR('A', 2, audio_info_t)
-#define	AUDIO_DRAIN	_IO('A', 3)
-#define	AUDIO_GETDEV	_IOR('A', 4, audio_device_t)
-#define	AUDIO_GETDEV_SUNOS	_IOR('A', 4, int)
-#define AUDIO_FLUSH     _IO('A', 5)
-
-/* Define possible audio hardware configurations for 
- * old SunOS-style AUDIO_GETDEV ioctl */
-#define AUDIO_DEV_UNKNOWN       (0)     /* not defined */
-#define AUDIO_DEV_AMD           (1)     /* audioamd device */
-#define AUDIO_DEV_SPEAKERBOX    (2)     /* dbri device with speakerbox */
-#define AUDIO_DEV_CODEC         (3)     /* dbri device (internal speaker) */
-#define AUDIO_DEV_CS4231        (5)     /* cs4231 device */
-
-/*
- * The following ioctl sets the audio device into an internal loopback mode,
- * if the hardware supports this.  The argument is TRUE to set loopback,
- * FALSE to reset to normal operation.  If the hardware does not support
- * internal loopback, the ioctl should fail with EINVAL.
- * Causes ADC data to be digitally mixed in and sent to the DAC.
- */
-#define	AUDIO_DIAG_LOOPBACK	_IOW('A', 101, int)
-
-#endif /* _AUDIOIO_H_ */
diff --git a/include/asm-sparc64/ebus.h b/include/asm-sparc64/ebus.h
index 543e4e5..7a408a0 100644
--- a/include/asm-sparc64/ebus.h
+++ b/include/asm-sparc64/ebus.h
@@ -79,6 +79,7 @@
 			    size_t len);
 extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
 extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
+extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
 extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
 
 extern struct linux_ebus		*ebus_chain;
diff --git a/include/asm-sparc64/kbio.h b/include/asm-sparc64/kbio.h
deleted file mode 100644
index 3cf496b..0000000
--- a/include/asm-sparc64/kbio.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef __LINUX_KBIO_H
-#define __LINUX_KBIO_H
-
-/* Return keyboard type */
-#define KIOCTYPE    _IOR('k', 9, int)
-/* Return Keyboard layout */
-#define KIOCLAYOUT  _IOR('k', 20, int)
-
-enum {
-    TR_NONE,
-    TR_ASCII,			/* keyboard is in regular state */
-    TR_EVENT,			/* keystrokes sent as firm events */
-    TR_UNTRANS_EVENT		/* EVENT+up and down+no translation */
-};
-
-/* Return the current keyboard translation */
-#define KIOCGTRANS  _IOR('k', 5, int)
-/* Set the keyboard translation */
-#define KIOCTRANS   _IOW('k', 0, int)
-
-/* Send a keyboard command */
-#define KIOCCMD     _IOW('k', 8, int)
-
-/* Return if keystrokes are being sent to /dev/kbd */
-
-/* Set routing of keystrokes to /dev/kbd */
-#define KIOCSDIRECT _IOW('k', 10, int)
-
-/* Set keyboard leds */
-#define KIOCSLED    _IOW('k', 14, unsigned char)
-
-/* Get keyboard leds */
-#define KIOCGLED    _IOR('k', 15, unsigned char)
-
-/* Used by KIOC[GS]RATE */
-struct kbd_rate {
-	unsigned char delay;	/* Delay in Hz before first repeat.	*/
-	unsigned char rate;	/* In characters per second (0..50).	*/
-};
-
-/* Set keyboard rate */
-#define KIOCSRATE   _IOW('k', 40, struct kbd_rate)
-
-/* Get keyboard rate */
-#define KIOCGRATE   _IOW('k', 41, struct kbd_rate)
-
-/* Top bit records if the key is up or down */
-#define KBD_UP      0x80
-
-/* Usable information */
-#define KBD_KEYMASK 0x7f
-
-/* All keys up */
-#define KBD_IDLE    0x75
-
-#endif /* __LINUX_KBIO_H */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 87c43c6..08ba72d 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -87,37 +87,35 @@
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
 	unsigned long ctx_valid;
+	int cpu;
 
+	/* Note: page_table_lock is used here to serialize switch_mm
+	 * and activate_mm, and their calls to get_new_mmu_context.
+	 * This use of page_table_lock is unrelated to its other uses.
+	 */ 
 	spin_lock(&mm->page_table_lock);
-	if (CTX_VALID(mm->context))
-		ctx_valid = 1;
-        else
-		ctx_valid = 0;
+	ctx_valid = CTX_VALID(mm->context);
+	if (!ctx_valid)
+		get_new_mmu_context(mm);
+	spin_unlock(&mm->page_table_lock);
 
 	if (!ctx_valid || (old_mm != mm)) {
-		if (!ctx_valid)
-			get_new_mmu_context(mm);
-
 		load_secondary_context(mm);
 		reload_tlbmiss_state(tsk, mm);
 	}
 
-	{
-		int cpu = smp_processor_id();
-
-		/* Even if (mm == old_mm) we _must_ check
-		 * the cpu_vm_mask.  If we do not we could
-		 * corrupt the TLB state because of how
-		 * smp_flush_tlb_{page,range,mm} on sparc64
-		 * and lazy tlb switches work. -DaveM
-		 */
-		if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
-			cpu_set(cpu, mm->cpu_vm_mask);
-			__flush_tlb_mm(CTX_HWBITS(mm->context),
-				       SECONDARY_CONTEXT);
-		}
+	/* Even if (mm == old_mm) we _must_ check
+	 * the cpu_vm_mask.  If we do not we could
+	 * corrupt the TLB state because of how
+	 * smp_flush_tlb_{page,range,mm} on sparc64
+	 * and lazy tlb switches work. -DaveM
+	 */
+	cpu = smp_processor_id();
+	if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
+		cpu_set(cpu, mm->cpu_vm_mask);
+		__flush_tlb_mm(CTX_HWBITS(mm->context),
+			       SECONDARY_CONTEXT);
 	}
-	spin_unlock(&mm->page_table_lock);
 }
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
@@ -127,6 +125,10 @@
 {
 	int cpu;
 
+	/* Note: page_table_lock is used here to serialize switch_mm
+	 * and activate_mm, and their calls to get_new_mmu_context.
+	 * This use of page_table_lock is unrelated to its other uses.
+	 */ 
 	spin_lock(&mm->page_table_lock);
 	if (!CTX_VALID(mm->context))
 		get_new_mmu_context(mm);
diff --git a/include/asm-sparc64/termios.h b/include/asm-sparc64/termios.h
index 9777a9c..ee26a07 100644
--- a/include/asm-sparc64/termios.h
+++ b/include/asm-sparc64/termios.h
@@ -38,15 +38,6 @@
 	int st_columns; /* Columns on the terminal */
 };
 
-/* Used for packet mode */
-#define TIOCPKT_DATA		 0
-#define TIOCPKT_FLUSHREAD	 1
-#define TIOCPKT_FLUSHWRITE	 2
-#define TIOCPKT_STOP		 4
-#define TIOCPKT_START		 8
-#define TIOCPKT_NOSTOP		16
-#define TIOCPKT_DOSTOP		32
-
 struct winsize {
 	unsigned short ws_row;
 	unsigned short ws_col;
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
index 66138d9..61c0188 100644
--- a/include/asm-sparc64/tlb.h
+++ b/include/asm-sparc64/tlb.h
@@ -58,11 +58,9 @@
 static inline void tlb_flush_mmu(struct mmu_gather *mp)
 {
 	if (mp->need_flush) {
+		free_pages_and_swap_cache(mp->pages, mp->pages_nr);
+		mp->pages_nr = 0;
 		mp->need_flush = 0;
-		if (!tlb_fast_mode(mp)) {
-			free_pages_and_swap_cache(mp->pages, mp->pages_nr);
-			mp->pages_nr = 0;
-		}
 	}
 
 }
@@ -78,11 +76,9 @@
 {
 	tlb_flush_mmu(mp);
 
-	if (mp->fullmm) {
-		if (CTX_VALID(mp->mm->context))
-			do_flush_tlb_mm(mp->mm);
+	if (mp->fullmm)
 		mp->fullmm = 0;
-	} else
+	else
 		flush_tlb_pending();
 
 	/* keep the page table cache within bounds */
@@ -93,11 +89,11 @@
 
 static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
 {
-	mp->need_flush = 1;
 	if (tlb_fast_mode(mp)) {
 		free_page_and_swap_cache(page);
 		return;
 	}
+	mp->need_flush = 1;
 	mp->pages[mp->pages_nr++] = page;
 	if (mp->pages_nr >= FREE_PTE_NR)
 		tlb_flush_mmu(mp);
diff --git a/include/asm-sparc64/vuid_event.h b/include/asm-sparc64/vuid_event.h
deleted file mode 100644
index 9ef4d17..0000000
--- a/include/asm-sparc64/vuid_event.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SunOS Virtual User Input Device (VUID) compatibility */
-
-typedef struct firm_event {
-	unsigned short id;	  /* tag for this event */
-	unsigned char  pair_type; /* unused by X11 */
-        unsigned char  pair;	  /* unused by X11 */
-        int            value;	  /* VKEY_UP, VKEY_DOWN or delta */
-        struct timeval time;
-} Firm_event;
-
-enum {
-    FE_PAIR_NONE,
-    FE_PAIR_SET,
-    FE_PAIR_DELTA,
-    FE_PAIR_ABSOLUTE
-};
-
-/* VUID stream formats */
-#define VUID_NATIVE     0	/* Native byte stream format */
-#define VUID_FIRM_EVENT 1	/* send firm_event structures */
-
-/* ioctls */
-    /* Set input device byte stream format (any of VUID_{NATIVE,FIRM_EVENT}) */
-#define VUIDSFORMAT   _IOW('v', 1, int)
-    /* Retrieve input device byte stream format */
-#define VUIDGFORMAT   _IOR('v', 2, int)
-
-/* Possible tag values */
-/*    mouse buttons: */
-#define MS_LEFT         0x7f20
-#define MS_MIDDLE       0x7f21
-#define MS_RIGHT        0x7f22
-/*    motion: */
-#define LOC_X_DELTA     0x7f80
-#define LOC_Y_DELTA     0x7f81
-#define LOC_X_ABSOLUTE  0x7f82  /* X compat, unsupported */
-#define LOC_Y_ABSOLUTE  0x7f83  /* X compat, unsupported */
-
-#define VKEY_UP   0
-#define VKEY_DOWN 1
diff --git a/include/linux/acct.h b/include/linux/acct.h
index 19f7046..93c5b3c 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -117,12 +117,15 @@
 #include <linux/config.h>
 
 #ifdef CONFIG_BSD_PROCESS_ACCT
+struct vfsmount;
 struct super_block;
+extern void acct_auto_close_mnt(struct vfsmount *m);
 extern void acct_auto_close(struct super_block *sb);
 extern void acct_process(long exitcode);
 extern void acct_update_integrals(struct task_struct *tsk);
 extern void acct_clear_integrals(struct task_struct *tsk);
 #else
+#define acct_auto_close_mnt(x)	do { } while (0)
 #define acct_auto_close(x)	do { } while (0)
 #define acct_process(x)		do { } while (0)
 #define acct_update_integrals(x)		do { } while (0)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index ab04b4f..46a2ba6 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -329,6 +329,7 @@
 }
 
 extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *);
+extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
 extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
 
 extern int sysctl_vfs_cache_pressure;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9a593ef..1b5f502 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -104,6 +104,10 @@
 #define MS_MOVE		8192
 #define MS_REC		16384
 #define MS_VERBOSE	32768
+#define MS_UNBINDABLE	(1<<17)	/* change to unbindable */
+#define MS_PRIVATE	(1<<18)	/* change to private */
+#define MS_SLAVE	(1<<19)	/* change to slave */
+#define MS_SHARED	(1<<20)	/* change to shared */
 #define MS_POSIXACL	(1<<16)	/* VFS does not apply the umask */
 #define MS_ACTIVE	(1<<30)
 #define MS_NOUSER	(1<<31)
@@ -1249,7 +1253,12 @@
 extern struct vfsmount *kern_mount(struct file_system_type *);
 extern int may_umount_tree(struct vfsmount *);
 extern int may_umount(struct vfsmount *);
+extern void umount_tree(struct vfsmount *, int, struct list_head *);
+extern void release_mounts(struct list_head *);
 extern long do_mount(char *, char *, char *, unsigned long, void *);
+extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
+extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
+				  struct vfsmount *);
 
 extern int vfs_statfs(struct super_block *, struct kstatfs *);
 
diff --git a/include/linux/mount.h b/include/linux/mount.h
index f8f3993..dd4e83e 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -17,12 +17,14 @@
 #include <linux/spinlock.h>
 #include <asm/atomic.h>
 
-#define MNT_NOSUID	1
-#define MNT_NODEV	2
-#define MNT_NOEXEC	4
+#define MNT_NOSUID	0x01
+#define MNT_NODEV	0x02
+#define MNT_NOEXEC	0x04
+#define MNT_SHARED	0x10	/* if the vfsmount is a shared mount */
+#define MNT_UNBINDABLE	0x20	/* if the vfsmount is a unbindable mount */
+#define MNT_PNODE_MASK	0x30	/* propogation flag mask */
 
-struct vfsmount
-{
+struct vfsmount {
 	struct list_head mnt_hash;
 	struct vfsmount *mnt_parent;	/* fs we are mounted on */
 	struct dentry *mnt_mountpoint;	/* dentry of mountpoint */
@@ -36,7 +38,12 @@
 	char *mnt_devname;		/* Name of device e.g. /dev/dsk/hda1 */
 	struct list_head mnt_list;
 	struct list_head mnt_expire;	/* link in fs-specific expiry list */
+	struct list_head mnt_share;	/* circular list of shared mounts */
+	struct list_head mnt_slave_list;/* list of slave mounts */
+	struct list_head mnt_slave;	/* slave list entry */
+	struct vfsmount *mnt_master;	/* slave is on master->mnt_slave_list */
 	struct namespace *mnt_namespace; /* containing namespace */
+	int mnt_pinned;
 };
 
 static inline struct vfsmount *mntget(struct vfsmount *mnt)
@@ -46,15 +53,9 @@
 	return mnt;
 }
 
-extern void __mntput(struct vfsmount *mnt);
-
-static inline void mntput_no_expire(struct vfsmount *mnt)
-{
-	if (mnt) {
-		if (atomic_dec_and_test(&mnt->mnt_count))
-			__mntput(mnt);
-	}
-}
+extern void mntput_no_expire(struct vfsmount *mnt);
+extern void mnt_pin(struct vfsmount *mnt);
+extern void mnt_unpin(struct vfsmount *mnt);
 
 static inline void mntput(struct vfsmount *mnt)
 {
diff --git a/include/linux/namespace.h b/include/linux/namespace.h
index 0e5a86f..6731977 100644
--- a/include/linux/namespace.h
+++ b/include/linux/namespace.h
@@ -9,7 +9,8 @@
 	atomic_t		count;
 	struct vfsmount *	root;
 	struct list_head	list;
-	struct rw_semaphore	sem;
+	wait_queue_head_t poll;
+	int event;
 };
 
 extern int copy_namespace(int, struct task_struct *);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 700ead4..f33aeb2 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -289,7 +289,6 @@
 	struct semaphore dqonoff_sem;		/* Serialize quotaon & quotaoff */
 	struct rw_semaphore dqptr_sem;		/* serialize ops using quota_info struct, pointers from inode to dquots */
 	struct inode *files[MAXQUOTAS];		/* inodes of quotafiles */
-	struct vfsmount *mnt[MAXQUOTAS];	/* mountpoint entries of filesystems with quota files */
 	struct mem_dqinfo info[MAXQUOTAS];	/* Information for each quota type */
 	struct quota_format_ops *ops[MAXQUOTAS];	/* Operations for each type */
 };
diff --git a/kernel/acct.c b/kernel/acct.c
index 2e3f4a4..6312d6b 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -54,6 +54,7 @@
 #include <linux/jiffies.h>
 #include <linux/times.h>
 #include <linux/syscalls.h>
+#include <linux/mount.h>
 #include <asm/uaccess.h>
 #include <asm/div64.h>
 #include <linux/blkdev.h> /* sector_div */
@@ -192,6 +193,7 @@
 		add_timer(&acct_globals.timer);
 	}
 	if (old_acct) {
+		mnt_unpin(old_acct->f_vfsmnt);
 		spin_unlock(&acct_globals.lock);
 		do_acct_process(0, old_acct);
 		filp_close(old_acct, NULL);
@@ -199,6 +201,42 @@
 	}
 }
 
+static int acct_on(char *name)
+{
+	struct file *file;
+	int error;
+
+	/* Difference from BSD - they don't do O_APPEND */
+	file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
+		filp_close(file, NULL);
+		return -EACCES;
+	}
+
+	if (!file->f_op->write) {
+		filp_close(file, NULL);
+		return -EIO;
+	}
+
+	error = security_acct(file);
+	if (error) {
+		filp_close(file, NULL);
+		return error;
+	}
+
+	spin_lock(&acct_globals.lock);
+	mnt_pin(file->f_vfsmnt);
+	acct_file_reopen(file);
+	spin_unlock(&acct_globals.lock);
+
+	mntput(file->f_vfsmnt);	/* it's pinned, now give up active reference */
+
+	return 0;
+}
+
 /**
  * sys_acct - enable/disable process accounting
  * @name: file name for accounting records or NULL to shutdown accounting
@@ -212,47 +250,41 @@
  */
 asmlinkage long sys_acct(const char __user *name)
 {
-	struct file *file = NULL;
-	char *tmp;
 	int error;
 
 	if (!capable(CAP_SYS_PACCT))
 		return -EPERM;
 
 	if (name) {
-		tmp = getname(name);
-		if (IS_ERR(tmp)) {
+		char *tmp = getname(name);
+		if (IS_ERR(tmp))
 			return (PTR_ERR(tmp));
-		}
-		/* Difference from BSD - they don't do O_APPEND */
-		file = filp_open(tmp, O_WRONLY|O_APPEND|O_LARGEFILE, 0);
+		error = acct_on(tmp);
 		putname(tmp);
-		if (IS_ERR(file)) {
-			return (PTR_ERR(file));
-		}
-		if (!S_ISREG(file->f_dentry->d_inode->i_mode)) {
-			filp_close(file, NULL);
-			return (-EACCES);
-		}
-
-		if (!file->f_op->write) {
-			filp_close(file, NULL);
-			return (-EIO);
+	} else {
+		error = security_acct(NULL);
+		if (!error) {
+			spin_lock(&acct_globals.lock);
+			acct_file_reopen(NULL);
+			spin_unlock(&acct_globals.lock);
 		}
 	}
+	return error;
+}
 
-	error = security_acct(file);
-	if (error) {
-		if (file)
-			filp_close(file, NULL);
-		return error;
-	}
-
+/**
+ * acct_auto_close - turn off a filesystem's accounting if it is on
+ * @m: vfsmount being shut down
+ *
+ * If the accounting is turned on for a file in the subtree pointed to
+ * to by m, turn accounting off.  Done when m is about to die.
+ */
+void acct_auto_close_mnt(struct vfsmount *m)
+{
 	spin_lock(&acct_globals.lock);
-	acct_file_reopen(file);
+	if (acct_globals.file && acct_globals.file->f_vfsmnt == m)
+		acct_file_reopen(NULL);
 	spin_unlock(&acct_globals.lock);
-
-	return (0);
 }
 
 /**
@@ -266,8 +298,8 @@
 {
 	spin_lock(&acct_globals.lock);
 	if (acct_globals.file &&
-	    acct_globals.file->f_dentry->d_inode->i_sb == sb) {
-		acct_file_reopen((struct file *)NULL);
+	    acct_globals.file->f_vfsmnt->mnt_sb == sb) {
+		acct_file_reopen(NULL);
 	}
 	spin_unlock(&acct_globals.lock);
 }
diff --git a/kernel/fork.c b/kernel/fork.c
index efac2c5..158710d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -470,13 +470,6 @@
 	if (clone_flags & CLONE_VM) {
 		atomic_inc(&oldmm->mm_users);
 		mm = oldmm;
-		/*
-		 * There are cases where the PTL is held to ensure no
-		 * new threads start up in user mode using an mm, which
-		 * allows optimizing out ipis; the tlb_gather_mmu code
-		 * is an example.
-		 */
-		spin_unlock_wait(&oldmm->page_table_lock);
 		goto good_mm;
 	}
 
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index f4361c5..1f8d27a 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -61,13 +61,37 @@
 MODULE_LICENSE("GPL");
 MODULE_SUPPORTED_DEVICE("{{Sun,CS4231}}");
 
-typedef struct snd_cs4231 {
+#ifdef SBUS_SUPPORT
+typedef struct sbus_dma_info {
+       spinlock_t      lock;
+       int             dir;
+       void __iomem    *regs;
+} sbus_dma_info_t;
+#endif
+
+typedef struct snd_cs4231 cs4231_t;
+
+typedef struct cs4231_dma_control {
+        void		(*prepare)(struct cs4231_dma_control *dma_cont, int dir);
+        void		(*enable)(struct cs4231_dma_control *dma_cont, int on);
+        int		(*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len);
+        unsigned int	(*address)(struct cs4231_dma_control *dma_cont);
+        void		(*reset)(cs4231_t *chip); 
+        void		(*preallocate)(cs4231_t *chip, snd_pcm_t *pcm); 
+#ifdef EBUS_SUPPORT
+	struct		ebus_dma_info	ebus_info;
+#endif
+#ifdef SBUS_SUPPORT
+	struct		sbus_dma_info	sbus_info;
+#endif
+} cs4231_dma_control_t;
+
+struct snd_cs4231 {
 	spinlock_t		lock;
 	void __iomem		*port;
-#ifdef EBUS_SUPPORT
-	struct ebus_dma_info	eb2c;
-	struct ebus_dma_info	eb2p;
-#endif
+
+	cs4231_dma_control_t	p_dma;
+	cs4231_dma_control_t	c_dma;
 
 	u32			flags;
 #define CS4231_FLAG_EBUS	0x00000001
@@ -106,7 +130,7 @@
 	unsigned int		irq[2];
 	unsigned int		regs_size;
 	struct snd_cs4231	*next;
-} cs4231_t;
+};
 
 static cs4231_t *cs4231_list;
 
@@ -251,6 +275,15 @@
 #define APCPNVA	0x38UL	/* APC Play DMA Next Address */
 #define APCPNC	0x3cUL	/* APC Play Next Count */
 
+/* Defines for SBUS DMA-routines */
+
+#define APCVA  0x0UL	/* APC DMA Address */
+#define APCC   0x4UL	/* APC Count */
+#define APCNVA 0x8UL	/* APC DMA Next Address */
+#define APCNC  0xcUL	/* APC Next Count */
+#define APC_PLAY 0x30UL	/* Play registers start at 0x30 */
+#define APC_RECORD 0x20UL /* Record registers start at 0x20 */
+
 /* APCCSR bits */
 
 #define APC_INT_PENDING 0x800000 /* Interrupt Pending */
@@ -569,8 +602,7 @@
 	spin_unlock_irqrestore(&chip->lock, flags);
 }
 
-#ifdef EBUS_SUPPORT
-static void snd_cs4231_ebus_advance_dma(struct ebus_dma_info *p, snd_pcm_substream_t *substream, unsigned int *periods_sent)
+static void snd_cs4231_advance_dma(struct cs4231_dma_control *dma_cont, snd_pcm_substream_t *substream, unsigned int *periods_sent)
 {
 	snd_pcm_runtime_t *runtime = substream->runtime;
 
@@ -581,129 +613,41 @@
 		if (period_size >= (1 << 24))
 			BUG();
 
-		if (ebus_dma_request(p, runtime->dma_addr + offset, period_size))
+		if (dma_cont->request(dma_cont, runtime->dma_addr + offset, period_size))
 			return;
 		(*periods_sent) = ((*periods_sent) + 1) % runtime->periods;
 	}
 }
-#endif
-
-#ifdef SBUS_SUPPORT
-static void snd_cs4231_sbus_advance_dma(snd_pcm_substream_t *substream, unsigned int *periods_sent)
-{
-	cs4231_t *chip = snd_pcm_substream_chip(substream);
-	snd_pcm_runtime_t *runtime = substream->runtime;
-
-	unsigned int period_size = snd_pcm_lib_period_bytes(substream);
-	unsigned int offset = period_size * (*periods_sent % runtime->periods);
-
-	if (runtime->period_size > 0xffff + 1)
-		BUG();
-
-	switch (substream->stream) {
-	case SNDRV_PCM_STREAM_PLAYBACK:
-		sbus_writel(runtime->dma_addr + offset, chip->port + APCPNVA);
-		sbus_writel(period_size, chip->port + APCPNC);
-		break;
-	case SNDRV_PCM_STREAM_CAPTURE:
-		sbus_writel(runtime->dma_addr + offset, chip->port + APCCNVA);
-		sbus_writel(period_size, chip->port + APCCNC);
-		break;
-	}
-
-	(*periods_sent) = (*periods_sent + 1) % runtime->periods;
-}
-#endif
 
 static void cs4231_dma_trigger(snd_pcm_substream_t *substream, unsigned int what, int on)
 {
 	cs4231_t *chip = snd_pcm_substream_chip(substream);
+	cs4231_dma_control_t *dma_cont;
 
-#ifdef EBUS_SUPPORT
-	if (chip->flags & CS4231_FLAG_EBUS) {
-		if (what & CS4231_PLAYBACK_ENABLE) {
-			if (on) {
-				ebus_dma_prepare(&chip->eb2p, 0);
-				ebus_dma_enable(&chip->eb2p, 1);
-				snd_cs4231_ebus_advance_dma(&chip->eb2p,
-					chip->playback_substream,
-					&chip->p_periods_sent);
-			} else {
-				ebus_dma_enable(&chip->eb2p, 0);
-			}
-		}
-		if (what & CS4231_RECORD_ENABLE) {
-			if (on) {
-				ebus_dma_prepare(&chip->eb2c, 1);
-				ebus_dma_enable(&chip->eb2c, 1);
-				snd_cs4231_ebus_advance_dma(&chip->eb2c,
-					chip->capture_substream,
-					&chip->c_periods_sent);
-			} else {
-				ebus_dma_enable(&chip->eb2c, 0);
-			}
-		}
-	} else {
-#endif
-#ifdef SBUS_SUPPORT
-	u32 csr = sbus_readl(chip->port + APCCSR);
-	/* I don't know why, but on sbus the period counter must
-	 * only start counting after the first period is sent.
-	 * Therefore this dummy thing.
-	 */
-	unsigned int dummy = 0;
-
-	switch (what) {
-	case CS4231_PLAYBACK_ENABLE:
+	if (what & CS4231_PLAYBACK_ENABLE) {
+		dma_cont = &chip->p_dma;
 		if (on) {
-			csr &= ~APC_XINT_PLAY;
-			sbus_writel(csr, chip->port + APCCSR);
-
-			csr &= ~APC_PPAUSE;
-			sbus_writel(csr, chip->port + APCCSR);
-
-			snd_cs4231_sbus_advance_dma(substream, &dummy);
-
-			csr |=  APC_GENL_INT | APC_PLAY_INT | APC_XINT_ENA |
-				APC_XINT_PLAY | APC_XINT_EMPT | APC_XINT_GENL |
-				APC_XINT_PENA | APC_PDMA_READY;
-			sbus_writel(csr, chip->port + APCCSR);
+			dma_cont->prepare(dma_cont, 0);
+			dma_cont->enable(dma_cont, 1);
+			snd_cs4231_advance_dma(dma_cont,
+				chip->playback_substream,
+				&chip->p_periods_sent);
 		} else {
-			csr |= APC_PPAUSE;
-			sbus_writel(csr, chip->port + APCCSR);
-
-			csr &= ~APC_PDMA_READY;
-			sbus_writel(csr, chip->port + APCCSR);
+			dma_cont->enable(dma_cont, 0);
 		}
-		break;
-	case CS4231_RECORD_ENABLE:
+	}
+	if (what & CS4231_RECORD_ENABLE) {
+		dma_cont = &chip->c_dma;
 		if (on) {
-			csr &= ~APC_XINT_CAPT;
-			sbus_writel(csr, chip->port + APCCSR);
-
-			csr &= ~APC_CPAUSE;
-			sbus_writel(csr, chip->port + APCCSR);
-
-			snd_cs4231_sbus_advance_dma(substream, &dummy);
-
-			csr |=  APC_GENL_INT | APC_CAPT_INT | APC_XINT_ENA |
-				APC_XINT_CAPT | APC_XINT_CEMP | APC_XINT_GENL |
-				APC_CDMA_READY;
-
-			sbus_writel(csr, chip->port + APCCSR);
+			dma_cont->prepare(dma_cont, 1);
+			dma_cont->enable(dma_cont, 1);
+			snd_cs4231_advance_dma(dma_cont,
+				chip->capture_substream,
+				&chip->c_periods_sent);
 		} else {
-			csr |= APC_CPAUSE;
-			sbus_writel(csr, chip->port + APCCSR);
-
-			csr &= ~APC_CDMA_READY;
-			sbus_writel(csr, chip->port + APCCSR);
+			dma_cont->enable(dma_cont, 0);
 		}
-		break;
 	}
-#endif
-#ifdef EBUS_SUPPORT
-	}
-#endif
 }
 
 static int snd_cs4231_trigger(snd_pcm_substream_t *substream, int cmd)
@@ -1136,10 +1080,7 @@
 	if (runtime->period_size > 0xffff + 1)
 		BUG();
 
-	snd_cs4231_out(chip, CS4231_PLY_LWR_CNT, (runtime->period_size - 1) & 0x00ff);
-	snd_cs4231_out(chip, CS4231_PLY_UPR_CNT, (runtime->period_size - 1) >> 8 & 0x00ff);
 	chip->p_periods_sent = 0;
-
 	spin_unlock_irqrestore(&chip->lock, flags);
 
 	return 0;
@@ -1171,16 +1112,14 @@
 static int snd_cs4231_capture_prepare(snd_pcm_substream_t *substream)
 {
 	cs4231_t *chip = snd_pcm_substream_chip(substream);
-	snd_pcm_runtime_t *runtime = substream->runtime;
 	unsigned long flags;
 
 	spin_lock_irqsave(&chip->lock, flags);
 	chip->image[CS4231_IFACE_CTRL] &= ~(CS4231_RECORD_ENABLE |
 					    CS4231_RECORD_PIO);
 
-	snd_cs4231_out(chip, CS4231_REC_LWR_CNT, (runtime->period_size - 1) & 0x00ff);
-	snd_cs4231_out(chip, CS4231_REC_LWR_CNT, (runtime->period_size - 1) >> 8 & 0x00ff);
 
+	chip->c_periods_sent = 0;
 	spin_unlock_irqrestore(&chip->lock, flags);
 
 	return 0;
@@ -1199,134 +1138,55 @@
 		chip->capture_substream->runtime->overrange++;
 }
 
-static irqreturn_t snd_cs4231_generic_interrupt(cs4231_t *chip)
-{
-	unsigned long flags;
-	unsigned char status;
-
-	/*This is IRQ is not raised by the cs4231*/
-	if (!(__cs4231_readb(chip, CS4231P(chip, STATUS)) & CS4231_GLOBALIRQ))
-		return IRQ_NONE;
-
-	status = snd_cs4231_in(chip, CS4231_IRQ_STATUS);
-
-	if (status & CS4231_TIMER_IRQ) {
-		if (chip->timer)
-			snd_timer_interrupt(chip->timer, chip->timer->sticks);
-	}		
-
-	if (status & CS4231_RECORD_IRQ)
-		snd_cs4231_overrange(chip);
-
-	/* ACK the CS4231 interrupt. */
-	spin_lock_irqsave(&chip->lock, flags);
-	snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0);
-	spin_unlock_irqrestore(&chip->lock, flags);
-
-	return 0;
-}
-
-#ifdef SBUS_SUPPORT
-static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-	cs4231_t *chip = dev_id;
-
-	/* ACK the APC interrupt. */
-	u32 csr = sbus_readl(chip->port + APCCSR);
-
-	sbus_writel(csr, chip->port + APCCSR);
-
-	if ((chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE) &&
-	    (csr & APC_PLAY_INT) &&
-	    (csr & APC_XINT_PNVA) &&
-	    !(csr & APC_XINT_EMPT)) {
-		snd_cs4231_sbus_advance_dma(chip->playback_substream,
-					    &chip->p_periods_sent);
-		snd_pcm_period_elapsed(chip->playback_substream);
-	}
-
-	if ((chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) &&
-	    (csr & APC_CAPT_INT) &&
-	    (csr & APC_XINT_CNVA)) {
-		snd_cs4231_sbus_advance_dma(chip->capture_substream,
-					    &chip->c_periods_sent);
-		snd_pcm_period_elapsed(chip->capture_substream);
-	}
-
-	return snd_cs4231_generic_interrupt(chip);
-}
-#endif
-
-#ifdef EBUS_SUPPORT
-static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event, void *cookie)
+static void snd_cs4231_play_callback(cs4231_t *cookie)
 {
 	cs4231_t *chip = cookie;
 
 	if (chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE) {
 		snd_pcm_period_elapsed(chip->playback_substream);
-		snd_cs4231_ebus_advance_dma(p, chip->playback_substream,
+		snd_cs4231_advance_dma(&chip->p_dma, chip->playback_substream,
 					    &chip->p_periods_sent);
 	}
 }
 
-static void snd_cs4231_ebus_capture_callback(struct ebus_dma_info *p, int event, void *cookie)
+static void snd_cs4231_capture_callback(cs4231_t *cookie)
 {
 	cs4231_t *chip = cookie;
 
 	if (chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE) {
 		snd_pcm_period_elapsed(chip->capture_substream);
-		snd_cs4231_ebus_advance_dma(p, chip->capture_substream,
+		snd_cs4231_advance_dma(&chip->c_dma, chip->capture_substream,
 					    &chip->c_periods_sent);
 	}
 }
-#endif
 
 static snd_pcm_uframes_t snd_cs4231_playback_pointer(snd_pcm_substream_t *substream)
 {
 	cs4231_t *chip = snd_pcm_substream_chip(substream);
-	size_t ptr, residue, period_bytes;
-
+	cs4231_dma_control_t *dma_cont = &chip->p_dma;
+	size_t ptr;
+	
 	if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_PLAYBACK_ENABLE))
 		return 0;
-	period_bytes = snd_pcm_lib_period_bytes(substream);
-	ptr = period_bytes * chip->p_periods_sent;
-#ifdef EBUS_SUPPORT
-	if (chip->flags & CS4231_FLAG_EBUS) {
-		residue = ebus_dma_residue(&chip->eb2p);
-	} else {
-#endif
-#ifdef SBUS_SUPPORT
-		residue = sbus_readl(chip->port + APCPC);
-#endif
-#ifdef EBUS_SUPPORT
-	}
-#endif
-	ptr += period_bytes - residue;
-
+	ptr = dma_cont->address(dma_cont);
+	if (ptr != 0)
+		ptr -= substream->runtime->dma_addr;
+	
 	return bytes_to_frames(substream->runtime, ptr);
 }
 
 static snd_pcm_uframes_t snd_cs4231_capture_pointer(snd_pcm_substream_t * substream)
 {
 	cs4231_t *chip = snd_pcm_substream_chip(substream);
-	size_t ptr, residue, period_bytes;
+	cs4231_dma_control_t *dma_cont = &chip->c_dma;
+	size_t ptr;
 	
 	if (!(chip->image[CS4231_IFACE_CTRL] & CS4231_RECORD_ENABLE))
 		return 0;
-	period_bytes = snd_pcm_lib_period_bytes(substream);
-	ptr = period_bytes * chip->c_periods_sent;
-#ifdef EBUS_SUPPORT
-	if (chip->flags & CS4231_FLAG_EBUS) {
-		residue = ebus_dma_residue(&chip->eb2c);
-	} else {
-#endif
-#ifdef SBUS_SUPPORT
-		residue = sbus_readl(chip->port + APCCC);
-#endif
-#ifdef EBUS_SUPPORT
-	}
-#endif
-	ptr += period_bytes - residue;
+	ptr = dma_cont->address(dma_cont);
+	if (ptr != 0)
+		ptr -= substream->runtime->dma_addr;
+	
 	return bytes_to_frames(substream->runtime, ptr);
 }
 
@@ -1362,30 +1222,8 @@
 	spin_lock_irqsave(&chip->lock, flags);
 
 
-	/* Reset DMA engine.  */
-#ifdef EBUS_SUPPORT
-	if (chip->flags & CS4231_FLAG_EBUS) {
-		/* Done by ebus_dma_register */
-	} else {
-#endif
-#ifdef SBUS_SUPPORT
-                sbus_writel(APC_CHIP_RESET, chip->port + APCCSR);
-                sbus_writel(0x00, chip->port + APCCSR);
-                sbus_writel(sbus_readl(chip->port + APCCSR) | APC_CDC_RESET,
-			    chip->port + APCCSR);
-  
-                udelay(20);
-  
-                sbus_writel(sbus_readl(chip->port + APCCSR) & ~APC_CDC_RESET,
-			    chip->port + APCCSR);
-                sbus_writel(sbus_readl(chip->port + APCCSR) | (APC_XINT_ENA |
-							       APC_XINT_PENA |
-							       APC_XINT_CENA),
-			    chip->port + APCCSR);
-#endif
-#ifdef EBUS_SUPPORT
-	}
-#endif
+	/* Reset DMA engine (sbus only).  */
+	chip->p_dma.reset(chip);
 
 	__cs4231_readb(chip, CS4231P(chip, STATUS));	/* clear any pendings IRQ */
 	__cs4231_writeb(chip, 0, CS4231P(chip, STATUS));
@@ -1505,8 +1343,8 @@
 {
 	cs4231_t *chip = snd_pcm_substream_chip(substream);
 
-	chip->playback_substream = NULL;
 	snd_cs4231_close(chip, CS4231_MODE_PLAY);
+	chip->playback_substream = NULL;
 
 	return 0;
 }
@@ -1515,8 +1353,8 @@
 {
 	cs4231_t *chip = snd_pcm_substream_chip(substream);
 
-	chip->capture_substream = NULL;
 	snd_cs4231_close(chip, CS4231_MODE_RECORD);
+	chip->capture_substream = NULL;
 
 	return 0;
 }
@@ -1571,21 +1409,7 @@
 	pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
 	strcpy(pcm->name, "CS4231");
 
-#ifdef EBUS_SUPPORT
-	if (chip->flags & CS4231_FLAG_EBUS) {
-		snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
-						      snd_dma_pci_data(chip->dev_u.pdev),
-						      64*1024, 128*1024);
-	} else {
-#endif
-#ifdef SBUS_SUPPORT
-		snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_SBUS,
-						      snd_dma_sbus_data(chip->dev_u.sdev),
-						      64*1024, 128*1024);
-#endif
-#ifdef EBUS_SUPPORT
-	}
-#endif
+	chip->p_dma.preallocate(chip, pcm);
 
 	chip->pcm = pcm;
 
@@ -1942,6 +1766,180 @@
 }
 
 #ifdef SBUS_SUPPORT
+
+static irqreturn_t snd_cs4231_sbus_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	unsigned long flags;
+	unsigned char status;
+	u32 csr;
+	cs4231_t *chip = dev_id;
+
+	/*This is IRQ is not raised by the cs4231*/
+	if (!(__cs4231_readb(chip, CS4231P(chip, STATUS)) & CS4231_GLOBALIRQ))
+		return IRQ_NONE;
+
+	/* ACK the APC interrupt. */
+	csr = sbus_readl(chip->port + APCCSR);
+
+	sbus_writel(csr, chip->port + APCCSR);
+
+	if ((csr & APC_PDMA_READY) && 
+ 	    (csr & APC_PLAY_INT) &&
+	    (csr & APC_XINT_PNVA) &&
+	    !(csr & APC_XINT_EMPT))
+			snd_cs4231_play_callback(chip);
+
+	if ((csr & APC_CDMA_READY) && 
+  	    (csr & APC_CAPT_INT) &&
+	    (csr & APC_XINT_CNVA) &&
+	    !(csr & APC_XINT_EMPT))
+			snd_cs4231_capture_callback(chip);
+	
+	status = snd_cs4231_in(chip, CS4231_IRQ_STATUS);
+
+	if (status & CS4231_TIMER_IRQ) {
+		if (chip->timer)
+			snd_timer_interrupt(chip->timer, chip->timer->sticks);
+	}		
+
+	if ((status & CS4231_RECORD_IRQ) && (csr & APC_CDMA_READY))
+		snd_cs4231_overrange(chip);
+
+	/* ACK the CS4231 interrupt. */
+	spin_lock_irqsave(&chip->lock, flags);
+	snd_cs4231_outm(chip, CS4231_IRQ_STATUS, ~CS4231_ALL_IRQS | ~status, 0);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+/*
+ * SBUS DMA routines
+ */
+
+int sbus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len)
+{
+	unsigned long flags;
+	u32 test, csr;
+	int err;
+	sbus_dma_info_t *base = &dma_cont->sbus_info;
+	
+	if (len >= (1 << 24))
+		return -EINVAL;
+	spin_lock_irqsave(&base->lock, flags);
+	csr = sbus_readl(base->regs + APCCSR);
+	err = -EINVAL;
+	test = APC_CDMA_READY;
+	if ( base->dir == APC_PLAY )
+		test = APC_PDMA_READY;
+	if (!(csr & test))
+		goto out;
+	err = -EBUSY;
+	csr = sbus_readl(base->regs + APCCSR);
+	test = APC_XINT_CNVA;
+	if ( base->dir == APC_PLAY )
+		test = APC_XINT_PNVA;
+	if (!(csr & test))
+		goto out;
+	err = 0;
+	sbus_writel(bus_addr, base->regs + base->dir + APCNVA);
+	sbus_writel(len, base->regs + base->dir + APCNC);
+out:
+	spin_unlock_irqrestore(&base->lock, flags);
+	return err;
+}
+
+void sbus_dma_prepare(struct cs4231_dma_control *dma_cont, int d)
+{
+	unsigned long flags;
+	u32 csr, test;
+	sbus_dma_info_t *base = &dma_cont->sbus_info;
+
+	spin_lock_irqsave(&base->lock, flags);
+	csr = sbus_readl(base->regs + APCCSR);
+	test =  APC_GENL_INT | APC_PLAY_INT | APC_XINT_ENA |
+		APC_XINT_PLAY | APC_XINT_PEMP | APC_XINT_GENL |
+		 APC_XINT_PENA;
+	if ( base->dir == APC_RECORD )
+		test = APC_GENL_INT | APC_CAPT_INT | APC_XINT_ENA |
+			APC_XINT_CAPT | APC_XINT_CEMP | APC_XINT_GENL;
+	csr |= test;
+	sbus_writel(csr, base->regs + APCCSR);
+	spin_unlock_irqrestore(&base->lock, flags);
+}
+
+void sbus_dma_enable(struct cs4231_dma_control *dma_cont, int on)
+{
+	unsigned long flags;
+	u32 csr, shift;
+	sbus_dma_info_t *base = &dma_cont->sbus_info;
+
+	spin_lock_irqsave(&base->lock, flags);
+	if (!on) {
+		if (base->dir == APC_PLAY) { 
+			sbus_writel(0, base->regs + base->dir + APCNVA); 
+			sbus_writel(1, base->regs + base->dir + APCC); 
+		}
+		else
+		{
+			sbus_writel(0, base->regs + base->dir + APCNC); 
+			sbus_writel(0, base->regs + base->dir + APCVA); 
+		} 
+	} 
+	udelay(600); 
+	csr = sbus_readl(base->regs + APCCSR);
+	shift = 0;
+	if ( base->dir == APC_PLAY )
+		shift = 1;
+	if (on)
+		csr &= ~(APC_CPAUSE << shift);
+	else
+		csr |= (APC_CPAUSE << shift); 
+	sbus_writel(csr, base->regs + APCCSR);
+	if (on)
+		csr |= (APC_CDMA_READY << shift);
+	else
+		csr &= ~(APC_CDMA_READY << shift);
+	sbus_writel(csr, base->regs + APCCSR);
+	
+	spin_unlock_irqrestore(&base->lock, flags);
+}
+
+unsigned int sbus_dma_addr(struct cs4231_dma_control *dma_cont)
+{
+	sbus_dma_info_t *base = &dma_cont->sbus_info;
+
+        return sbus_readl(base->regs + base->dir + APCVA);
+}
+
+void sbus_dma_reset(cs4231_t *chip)
+{
+        sbus_writel(APC_CHIP_RESET, chip->port + APCCSR);
+        sbus_writel(0x00, chip->port + APCCSR);
+        sbus_writel(sbus_readl(chip->port + APCCSR) | APC_CDC_RESET,
+		    chip->port + APCCSR);
+  
+        udelay(20);
+  
+        sbus_writel(sbus_readl(chip->port + APCCSR) & ~APC_CDC_RESET,
+		    chip->port + APCCSR);
+        sbus_writel(sbus_readl(chip->port + APCCSR) | (APC_XINT_ENA |
+		       APC_XINT_PENA |
+		       APC_XINT_CENA),
+	               chip->port + APCCSR);
+}
+
+void sbus_dma_preallocate(cs4231_t *chip, snd_pcm_t *pcm)
+{
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_SBUS,
+					      snd_dma_sbus_data(chip->dev_u.sdev),
+					      64*1024, 128*1024);
+}
+
+/*
+ * Init and exit routines
+ */
+
 static int snd_cs4231_sbus_free(cs4231_t *chip)
 {
 	if (chip->irq[0])
@@ -1983,6 +1981,8 @@
 		return -ENOMEM;
 
 	spin_lock_init(&chip->lock);
+	spin_lock_init(&chip->c_dma.sbus_info.lock);
+	spin_lock_init(&chip->p_dma.sbus_info.lock);
 	init_MUTEX(&chip->mce_mutex);
 	init_MUTEX(&chip->open_mutex);
 	chip->card = card;
@@ -1998,6 +1998,25 @@
 		return -EIO;
 	}
 
+	chip->c_dma.sbus_info.regs = chip->port;
+	chip->p_dma.sbus_info.regs = chip->port;
+	chip->c_dma.sbus_info.dir = APC_RECORD;
+	chip->p_dma.sbus_info.dir = APC_PLAY;
+
+	chip->p_dma.prepare = sbus_dma_prepare;
+	chip->p_dma.enable = sbus_dma_enable;
+	chip->p_dma.request = sbus_dma_request;
+	chip->p_dma.address = sbus_dma_addr;
+	chip->p_dma.reset = sbus_dma_reset;
+	chip->p_dma.preallocate = sbus_dma_preallocate;
+
+	chip->c_dma.prepare = sbus_dma_prepare;
+	chip->c_dma.enable = sbus_dma_enable;
+	chip->c_dma.request = sbus_dma_request;
+	chip->c_dma.address = sbus_dma_addr;
+	chip->c_dma.reset = sbus_dma_reset;
+	chip->c_dma.preallocate = sbus_dma_preallocate;
+
 	if (request_irq(sdev->irqs[0], snd_cs4231_sbus_interrupt,
 			SA_SHIRQ, "cs4231", chip)) {
 		snd_printdd("cs4231-%d: Unable to grab SBUS IRQ %s\n",
@@ -2051,15 +2070,70 @@
 #endif
 
 #ifdef EBUS_SUPPORT
+
+static void snd_cs4231_ebus_play_callback(struct ebus_dma_info *p, int event, void *cookie)
+{
+	cs4231_t *chip = cookie;
+	
+	snd_cs4231_play_callback(chip);
+}
+
+static void snd_cs4231_ebus_capture_callback(struct ebus_dma_info *p, int event, void *cookie)
+{
+	cs4231_t *chip = cookie;
+
+	snd_cs4231_capture_callback(chip);
+}
+
+/*
+ * EBUS DMA wrappers
+ */
+
+int _ebus_dma_request(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len)
+{
+	return ebus_dma_request(&dma_cont->ebus_info, bus_addr, len);
+}
+
+void _ebus_dma_enable(struct cs4231_dma_control *dma_cont, int on)
+{
+	ebus_dma_enable(&dma_cont->ebus_info, on);
+}
+
+void _ebus_dma_prepare(struct cs4231_dma_control *dma_cont, int dir)
+{
+	ebus_dma_prepare(&dma_cont->ebus_info, dir);
+}
+
+unsigned int _ebus_dma_addr(struct cs4231_dma_control *dma_cont)
+{
+	return ebus_dma_addr(&dma_cont->ebus_info);
+}
+
+void _ebus_dma_reset(cs4231_t *chip)
+{
+	return;
+}
+
+void _ebus_dma_preallocate(cs4231_t *chip, snd_pcm_t *pcm)
+{
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+				      snd_dma_pci_data(chip->dev_u.pdev),
+				      64*1024, 128*1024);
+}
+
+/*
+ * Init and exit routines
+ */
+
 static int snd_cs4231_ebus_free(cs4231_t *chip)
 {
-	if (chip->eb2c.regs) {
-		ebus_dma_unregister(&chip->eb2c);
-		iounmap(chip->eb2c.regs);
+	if (chip->c_dma.ebus_info.regs) {
+		ebus_dma_unregister(&chip->c_dma.ebus_info);
+		iounmap(chip->c_dma.ebus_info.regs);
 	}
-	if (chip->eb2p.regs) {
-		ebus_dma_unregister(&chip->eb2p);
-		iounmap(chip->eb2p.regs);
+	if (chip->p_dma.ebus_info.regs) {
+		ebus_dma_unregister(&chip->p_dma.ebus_info);
+		iounmap(chip->p_dma.ebus_info.regs);
 	}
 
 	if (chip->port)
@@ -2097,8 +2171,8 @@
 		return -ENOMEM;
 
 	spin_lock_init(&chip->lock);
-	spin_lock_init(&chip->eb2c.lock);
-	spin_lock_init(&chip->eb2p.lock);
+	spin_lock_init(&chip->c_dma.ebus_info.lock);
+	spin_lock_init(&chip->p_dma.ebus_info.lock);
 	init_MUTEX(&chip->mce_mutex);
 	init_MUTEX(&chip->open_mutex);
 	chip->flags |= CS4231_FLAG_EBUS;
@@ -2106,43 +2180,57 @@
 	chip->dev_u.pdev = edev->bus->self;
 	memcpy(&chip->image, &snd_cs4231_original_image,
 	       sizeof(snd_cs4231_original_image));
-	strcpy(chip->eb2c.name, "cs4231(capture)");
-	chip->eb2c.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER;
-	chip->eb2c.callback = snd_cs4231_ebus_capture_callback;
-	chip->eb2c.client_cookie = chip;
-	chip->eb2c.irq = edev->irqs[0];
-	strcpy(chip->eb2p.name, "cs4231(play)");
-	chip->eb2p.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER;
-	chip->eb2p.callback = snd_cs4231_ebus_play_callback;
-	chip->eb2p.client_cookie = chip;
-	chip->eb2p.irq = edev->irqs[1];
+	strcpy(chip->c_dma.ebus_info.name, "cs4231(capture)");
+	chip->c_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER;
+	chip->c_dma.ebus_info.callback = snd_cs4231_ebus_capture_callback;
+	chip->c_dma.ebus_info.client_cookie = chip;
+	chip->c_dma.ebus_info.irq = edev->irqs[0];
+	strcpy(chip->p_dma.ebus_info.name, "cs4231(play)");
+	chip->p_dma.ebus_info.flags = EBUS_DMA_FLAG_USE_EBDMA_HANDLER;
+	chip->p_dma.ebus_info.callback = snd_cs4231_ebus_play_callback;
+	chip->p_dma.ebus_info.client_cookie = chip;
+	chip->p_dma.ebus_info.irq = edev->irqs[1];
+
+	chip->p_dma.prepare = _ebus_dma_prepare;
+	chip->p_dma.enable = _ebus_dma_enable;
+	chip->p_dma.request = _ebus_dma_request;
+	chip->p_dma.address = _ebus_dma_addr;
+	chip->p_dma.reset = _ebus_dma_reset;
+	chip->p_dma.preallocate = _ebus_dma_preallocate;
+
+	chip->c_dma.prepare = _ebus_dma_prepare;
+	chip->c_dma.enable = _ebus_dma_enable;
+	chip->c_dma.request = _ebus_dma_request;
+	chip->c_dma.address = _ebus_dma_addr;
+	chip->c_dma.reset = _ebus_dma_reset;
+	chip->c_dma.preallocate = _ebus_dma_preallocate;
 
 	chip->port = ioremap(edev->resource[0].start, 0x10);
-	chip->eb2p.regs = ioremap(edev->resource[1].start, 0x10);
-	chip->eb2c.regs = ioremap(edev->resource[2].start, 0x10);
-	if (!chip->port || !chip->eb2p.regs || !chip->eb2c.regs) {
+	chip->p_dma.ebus_info.regs = ioremap(edev->resource[1].start, 0x10);
+	chip->c_dma.ebus_info.regs = ioremap(edev->resource[2].start, 0x10);
+	if (!chip->port || !chip->p_dma.ebus_info.regs || !chip->c_dma.ebus_info.regs) {
 		snd_cs4231_ebus_free(chip);
 		snd_printdd("cs4231-%d: Unable to map chip registers.\n", dev);
 		return -EIO;
 	}
 
-	if (ebus_dma_register(&chip->eb2c)) {
+	if (ebus_dma_register(&chip->c_dma.ebus_info)) {
 		snd_cs4231_ebus_free(chip);
 		snd_printdd("cs4231-%d: Unable to register EBUS capture DMA\n", dev);
 		return -EBUSY;
 	}
-	if (ebus_dma_irq_enable(&chip->eb2c, 1)) {
+	if (ebus_dma_irq_enable(&chip->c_dma.ebus_info, 1)) {
 		snd_cs4231_ebus_free(chip);
 		snd_printdd("cs4231-%d: Unable to enable EBUS capture IRQ\n", dev);
 		return -EBUSY;
 	}
 
-	if (ebus_dma_register(&chip->eb2p)) {
+	if (ebus_dma_register(&chip->p_dma.ebus_info)) {
 		snd_cs4231_ebus_free(chip);
 		snd_printdd("cs4231-%d: Unable to register EBUS play DMA\n", dev);
 		return -EBUSY;
 	}
-	if (ebus_dma_irq_enable(&chip->eb2p, 1)) {
+	if (ebus_dma_irq_enable(&chip->p_dma.ebus_info, 1)) {
 		snd_cs4231_ebus_free(chip);
 		snd_printdd("cs4231-%d: Unable to enable EBUS play IRQ\n", dev);
 		return -EBUSY;