Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  drivers/ata/pata_octeon_cf.c: delete double assignment
  pata_legacy: fix CONFIG_PATA_WINBOND_VLB_MODULE test
  libata: fix NULL sdev dereference race in atapi_qc_complete()
diff --git a/MAINTAINERS b/MAINTAINERS
index 0094224..88b74a7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,7 +161,7 @@
 L:	linux-serial@vger.kernel.org
 W:	http://serial.sourceforge.net
 S:	Maintained
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
 F:	drivers/serial/8250*
 F:	include/linux/serial_8250.h
 
@@ -5676,7 +5676,7 @@
 
 STAGING SUBSYSTEM
 M:	Greg Kroah-Hartman <gregkh@suse.de>
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
 L:	devel@driverdev.osuosl.org
 S:	Maintained
 F:	drivers/staging/
@@ -5910,7 +5910,7 @@
 TTY LAYER
 M:	Greg Kroah-Hartman <gregkh@suse.de>
 S:	Maintained
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
 F:	drivers/char/tty_*
 F:	drivers/serial/serial_core.c
 F:	include/linux/serial_core.h
@@ -6233,7 +6233,7 @@
 M:	Greg Kroah-Hartman <gregkh@suse.de>
 L:	linux-usb@vger.kernel.org
 W:	http://www.linux-usb.org
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
 S:	Supported
 F:	Documentation/usb/
 F:	drivers/net/usb/
@@ -6598,14 +6598,14 @@
 
 XEN PCI SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xensource.com
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:	Supported
 F:	arch/x86/pci/*xen*
 F:	drivers/pci/*xen*
 
 XEN SWIOTLB SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xensource.com
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:	Supported
 F:	arch/x86/xen/*swiotlb*
 F:	drivers/xen/*swiotlb*
@@ -6613,7 +6613,7 @@
 XEN HYPERVISOR INTERFACE
 M:	Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xen.org
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 L:	virtualization@lists.osdl.org
 S:	Supported
 F:	arch/x86/xen/
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 6f42a18..fc81912 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -284,12 +284,14 @@
 	if (!size)
 		return;
 
-	paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT);
+	paddr = memblock_alloc(size, SZ_1M);
 	if (!paddr) {
 		pr_err("%s: failed to reserve %x bytes\n",
 				__func__, size);
 		return;
 	}
+	memblock_free(paddr, size);
+	memblock_remove(paddr, size);
 
 	omap_dsp_phys_mempool_base = paddr;
 }
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 117f5b8..d7b5109 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -147,8 +147,10 @@
 		irq = xen_allocate_pirq(v[i], 0, /* not sharable */
 			(type == PCI_CAP_ID_MSIX) ?
 			"pcifront-msi-x" : "pcifront-msi");
-		if (irq < 0)
-			return -1;
+		if (irq < 0) {
+			ret = -1;
+			goto free;
+		}
 
 		ret = set_irq_msi(irq, msidesc);
 		if (ret)
@@ -164,7 +166,7 @@
 	if (ret == -ENODEV)
 		dev_err(&dev->dev, "Xen PCI frontend has not registered" \
 			" MSI/MSI-X support!\n");
-
+free:
 	kfree(v);
 	return ret;
 }
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c237b81..21ed8d7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2126,7 +2126,7 @@
 {
 	pmd_t *kernel_pmd;
 
-	level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+	level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
 
 	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
 				  xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa..769c4b0 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@
 						     const struct e820map *e820)
 {
 	phys_addr_t max_addr = PFN_PHYS(max_pfn);
-	phys_addr_t last_end = 0;
+	phys_addr_t last_end = ISA_END_ADDRESS;
 	unsigned long released = 0;
 	int i;
 
+	/* Free any unused memory above the low 1Mbyte. */
 	for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
 		phys_addr_t end = e820->map[i].addr;
 		end = min(max_addr, end);
 
-		released += xen_release_chunk(last_end, end);
-		last_end = e820->map[i].addr + e820->map[i].size;
+		if (last_end < end)
+			released += xen_release_chunk(last_end, end);
+		last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
 	}
 
 	if (last_end < max_addr)
@@ -164,6 +166,7 @@
 		XENMEM_memory_map;
 	rc = HYPERVISOR_memory_op(op, &memmap);
 	if (rc == -ENOSYS) {
+		BUG_ON(xen_initial_domain());
 		memmap.nr_entries = 1;
 		map[0].addr = 0ULL;
 		map[0].size = mem_end;
@@ -201,12 +204,13 @@
 	}
 
 	/*
-	 * Even though this is normal, usable memory under Xen, reserve
-	 * ISA memory anyway because too many things think they can poke
+	 * In domU, the ISA region is normal, usable memory, but we
+	 * reserve ISA memory anyway because too many things poke
 	 * about in there.
 	 *
-	 * In a dom0 kernel, this region is identity mapped with the
-	 * hardware ISA area, so it really is out of bounds.
+	 * In Dom0, the host E820 information can leave gaps in the
+	 * ISA range, which would cause us to release those pages.  To
+	 * avoid this, we unconditionally reserve them here.
 	 */
 	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
 			E820_RESERVED);
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index b0a7046..c0bd6f4 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1299,7 +1299,6 @@
 {
 	struct async_struct * info = tty->driver_data;
 	struct async_icount cprev, cnow;	/* kernel counter temps */
-	struct serial_icounter_struct icount;
 	void __user *argp = (void __user *)arg;
 	unsigned long flags;
 
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dd3f9b1..294d03e 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1828,7 +1828,6 @@
 		      unsigned int cmd, unsigned long arg)
 {
 	struct port *port = tty->driver_data;
-	void __user *argp = (void __user *)arg;
 	int rval = -ENOIOCTLCMD;
 
 	DBG1("******** IOCTL, cmd: %d", cmd);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index bfc10f8..eaa4199 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2796,6 +2796,7 @@
 	.hangup = mgslpc_hangup,
 	.tiocmget = tiocmget,
 	.tiocmset = tiocmset,
+	.get_icount = mgslpc_get_icount,
 	.proc_fops = &mgslpc_proc_fops,
 };
 
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index a87c498..3a5a6fc 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -13,7 +13,6 @@
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include <linux/msi.h>
-#include <xen/xenbus.h>
 #include <xen/interface/io/pciif.h>
 #include <asm/xen/pci.h>
 #include <linux/interrupt.h>
@@ -576,8 +575,9 @@
 
 	pcidev = pci_get_bus_and_slot(bus, devfn);
 	if (!pcidev || !pcidev->driver) {
-		dev_err(&pcidev->dev,
-			"device or driver is NULL\n");
+		dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
+		if (pcidev)
+			pci_dev_put(pcidev);
 		return result;
 	}
 	pdrv = pcidev->driver;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 4d8e14b..dd5e1ac 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2343,8 +2343,11 @@
 
 	/*
 	 * CTS flow control flag and modem status interrupts
+	 * Only disable MSI if no threads are waiting in
+	 * serial_core::uart_wait_modem_status
 	 */
-	up->ier &= ~UART_IER_MSI;
+	if (!waitqueue_active(&up->port.state->port.delta_msr_wait))
+		up->ier &= ~UART_IER_MSI;
 	if (!(up->bugs & UART_BUG_NOMSR) &&
 			UART_ENABLE_MS(&up->port, termios->c_cflag))
 		up->ier |= UART_IER_MSI;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 53be4d3..842e3b2 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2285,6 +2285,8 @@
 
 static const struct pci_device_id softmodem_blacklist[] = {
 	{ PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
+	{ PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
+	{ PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
 };
 
 /*
@@ -2863,6 +2865,9 @@
 		PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
 		0, 0,
 		pbn_b0_4_1152000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0x9505,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_bt_2_921600 },
 
 		/*
 		 * The below card is a little controversial since it is the
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index a9eff2b..19cac9f 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -23,6 +23,7 @@
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
+#include <linux/dma-mapping.h>
 
 #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
 	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -33,12 +34,10 @@
 #include <asm/gpio.h>
 #include <mach/bfin_serial_5xx.h>
 
-#ifdef CONFIG_SERIAL_BFIN_DMA
-#include <linux/dma-mapping.h>
+#include <asm/dma.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/cacheflush.h>
-#endif
 
 #ifdef CONFIG_SERIAL_BFIN_MODULE
 # undef CONFIG_EARLY_PRINTK
@@ -360,7 +359,6 @@
 		UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 		uart->port.icount.tx++;
-		SSYNC();
 	}
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -688,6 +686,13 @@
 
 # ifdef CONFIG_BF54x
 	{
+		/*
+		 * UART2 and UART3 on BF548 share interrupt PINs and DMA
+		 * controllers with SPORT2 and SPORT3. UART rx and tx
+		 * interrupts are generated in PIO mode only when configure
+		 * their peripheral mapping registers properly, which means
+		 * request corresponding DMA channels in PIO mode as well.
+		 */
 		unsigned uart_dma_ch_rx, uart_dma_ch_tx;
 
 		switch (uart->port.irq) {
@@ -734,8 +739,7 @@
 			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
 			IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
 			uart->cts_pin = -1;
-			pr_info("Unable to attach BlackFin UART CTS interrupt.\
-				 So, disable it.\n");
+			pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
 		}
 	}
 	if (uart->rts_pin >= 0) {
@@ -747,8 +751,7 @@
 	if (request_irq(uart->status_irq,
 		bfin_serial_mctrl_cts_int,
 		IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
-		pr_info("Unable to attach BlackFin UART Modem \
-			Status interrupt.\n");
+		pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
 	}
 
 	/* CTS RTS PINs are negative assertive. */
@@ -846,6 +849,8 @@
 	if (termios->c_cflag & CMSPAR)
 		lcr |= STP;
 
+	spin_lock_irqsave(&uart->port.lock, flags);
+
 	port->read_status_mask = OE;
 	if (termios->c_iflag & INPCK)
 		port->read_status_mask |= (FE | PE);
@@ -875,8 +880,6 @@
 	if (termios->c_line != N_IRDA)
 		quot -= ANOMALY_05000230;
 
-	spin_lock_irqsave(&uart->port.lock, flags);
-
 	UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
 
 	/* Disable UART */
@@ -1321,6 +1324,14 @@
 	struct bfin_serial_port *uart;
 	struct ktermios t;
 
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+	/*
+	 * If we are using early serial, don't let the normal console rewind
+	 * log buffer, since that causes things to be printed multiple times
+	 */
+	bfin_serial_console.flags &= ~CON_PRINTBUFFER;
+#endif
+
 	if (port == -1 || port >= nr_active_ports)
 		port = 0;
 	bfin_serial_init_ports();
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index ae2cdf4..8a5caa3 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -102,7 +102,7 @@
 
 config ATH6KL_CFG80211
 	bool "CFG80211 support"
-	depends on ATH6K_LEGACY
+	depends on ATH6K_LEGACY && CFG80211
 	help
 	Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space.
 
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index c5a6d6c..a659f70 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -1126,7 +1126,7 @@
         if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
             A_UINT32 param;
 
-            status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size);
+            status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size);
 
             if (status != A_OK) {
                 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
@@ -3030,7 +3030,8 @@
         A_UINT8 csumDest=0;
         A_UINT8 csum=skb->ip_summed;
         if(csumOffload && (csum==CHECKSUM_PARTIAL)){
-            csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR);
+            csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
+			 sizeof(ATH_LLC_SNAP_HDR));
             csumDest=skb->csum_offset+csumStart;
         }
 #endif
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index c94ad29..7269d0a 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -808,7 +808,7 @@
 
 static int
 ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
-                      A_UINT8 key_index, const A_UINT8 *mac_addr,
+                      A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
                       struct key_params *params)
 {
     AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
@@ -901,7 +901,7 @@
 
 static int
 ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
-                      A_UINT8 key_index, const A_UINT8 *mac_addr)
+                      A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr)
 {
     AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
 
@@ -936,7 +936,8 @@
 
 static int
 ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
-                      A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie,
+                      A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
+                      void *cookie,
                       void (*callback)(void *cookie, struct key_params*))
 {
     AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 80cfa86..b68a7e5 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -165,7 +165,7 @@
 	       batman_if->net_dev->dev_addr, ETH_ALEN);
 }
 
-static void check_known_mac_addr(uint8_t *addr)
+static void check_known_mac_addr(struct net_device *net_dev)
 {
 	struct batman_if *batman_if;
 
@@ -175,11 +175,16 @@
 		    (batman_if->if_status != IF_TO_BE_ACTIVATED))
 			continue;
 
-		if (!compare_orig(batman_if->net_dev->dev_addr, addr))
+		if (batman_if->net_dev == net_dev)
+			continue;
+
+		if (!compare_orig(batman_if->net_dev->dev_addr,
+				  net_dev->dev_addr))
 			continue;
 
 		pr_warning("The newly added mac address (%pM) already exists "
-			   "on: %s\n", addr, batman_if->net_dev->name);
+			   "on: %s\n", net_dev->dev_addr,
+			   batman_if->net_dev->name);
 		pr_warning("It is strongly recommended to keep mac addresses "
 			   "unique to avoid problems!\n");
 	}
@@ -430,7 +435,7 @@
 	atomic_set(&batman_if->refcnt, 0);
 	hardif_hold(batman_if);
 
-	check_known_mac_addr(batman_if->net_dev->dev_addr);
+	check_known_mac_addr(batman_if->net_dev);
 
 	spin_lock(&if_list_lock);
 	list_add_tail_rcu(&batman_if->list, &if_list);
@@ -515,7 +520,7 @@
 			goto out;
 		}
 
-		check_known_mac_addr(batman_if->net_dev->dev_addr);
+		check_known_mac_addr(batman_if->net_dev);
 		update_mac_addresses(batman_if);
 
 		bat_priv = netdev_priv(batman_if->soft_iface);
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 9010263..657b69e 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -1000,10 +1000,10 @@
 
 /* find a suitable router for this originator, and use
  * bonding if possible. */
-struct neigh_node *find_router(struct orig_node *orig_node,
+struct neigh_node *find_router(struct bat_priv *bat_priv,
+			       struct orig_node *orig_node,
 			       struct batman_if *recv_if)
 {
-	struct bat_priv *bat_priv;
 	struct orig_node *primary_orig_node;
 	struct orig_node *router_orig;
 	struct neigh_node *router, *first_candidate, *best_router;
@@ -1019,13 +1019,9 @@
 	/* without bonding, the first node should
 	 * always choose the default router. */
 
-	if (!recv_if)
-		return orig_node->router;
-
-	bat_priv = netdev_priv(recv_if->soft_iface);
 	bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
 
-	if (!bonding_enabled)
+	if ((!recv_if) && (!bonding_enabled))
 		return orig_node->router;
 
 	router_orig = orig_node->router->orig_node;
@@ -1154,7 +1150,7 @@
 	orig_node = ((struct orig_node *)
 		     hash_find(bat_priv->orig_hash, unicast_packet->dest));
 
-	router = find_router(orig_node, recv_if);
+	router = find_router(bat_priv, orig_node, recv_if);
 
 	if (!router) {
 		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 06ea99d..92674c8 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -38,8 +38,8 @@
 int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
 int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
 int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
-struct neigh_node *find_router(struct orig_node *orig_node,
-		struct batman_if *recv_if);
+struct neigh_node *find_router(struct bat_priv *bat_priv,
+		struct orig_node *orig_node, struct batman_if *recv_if);
 void update_bonding_candidates(struct bat_priv *bat_priv,
 			       struct orig_node *orig_node);
 
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
index 0dac50d..0459413 100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@ -224,7 +224,7 @@
 	if (!orig_node)
 		orig_node = transtable_search(bat_priv, ethhdr->h_dest);
 
-	router = find_router(orig_node, NULL);
+	router = find_router(bat_priv, orig_node, NULL);
 
 	if (!router)
 		goto unlock;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 77fdfe2..fead9c5 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1001,13 +1001,15 @@
 		}
 #endif
 		case IOCTL_BE_BUCKET_SIZE:
-			Adapter->BEBucketSize = *(PULONG)arg;
-			Status = STATUS_SUCCESS;
+			Status = 0;
+			if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
+				Status = -EFAULT;
 			break;
 
 		case IOCTL_RTPS_BUCKET_SIZE:
-			Adapter->rtPSBucketSize = *(PULONG)arg;
-			Status = STATUS_SUCCESS;
+			Status = 0;
+			if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
+				Status = -EFAULT;
 			break;
 		case IOCTL_CHIP_RESET:
 	    {
@@ -1028,11 +1030,15 @@
 		case IOCTL_QOS_THRESHOLD:
 		{
 			USHORT uiLoopIndex;
-			for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++)
-			{
-				Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg;
+
+			Status = 0;
+			for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
+				if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
+						(unsigned long __user *)arg)) {
+					Status = -EFAULT;
+					break;
+				}
 			}
-			Status = STATUS_SUCCESS;
 			break;
 		}
 
@@ -1093,7 +1099,8 @@
 		}
 		case IOCTL_BCM_GET_CURRENT_STATUS:
 		{
-			LINK_STATE *plink_state = NULL;
+			LINK_STATE plink_state;
+
 			/* Copy Ioctl Buffer structure */
 			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
 			{
@@ -1101,13 +1108,19 @@
 				Status = -EFAULT;
 				break;
 			}
-			plink_state = (LINK_STATE*)arg;
-			plink_state->bIdleMode = (UCHAR)Adapter->IdleMode;
-			plink_state->bShutdownMode = Adapter->bShutStatus;
-			plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus;
-			if(copy_to_user(IoBuffer.OutputBuffer,
-				(PUCHAR)plink_state, (UINT)IoBuffer.OutputLength))
-			{
+			if (IoBuffer.OutputLength != sizeof(plink_state)) {
+				Status = -EINVAL;
+				break;
+			}
+
+			if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
+				Status = -EFAULT;
+				break;
+			}
+			plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
+			plink_state.bShutdownMode = Adapter->bShutStatus;
+			plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
+			if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
 				Status = -EFAULT;
 				break;
@@ -1331,7 +1344,9 @@
 						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
 						return -EFAULT;
 					}
-					uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */
+					if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
+						return -EFAULT;
+
 					if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
 					{
 
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index c3ba9bb..c8f1cf1 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -90,5 +90,5 @@
 =============
 Brett Rudley	brudley@broadcom.com
 Henry Ptasinski henryp@broadcom.com
-Nohee Ko	noheek@broadcom.com
+Dowan Kim 	dowan@broadcom.com
 
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index 8803d30..dbf9041 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -45,5 +45,5 @@
 =====
 Brett Rudley <brudley@broadcom.com>
 Henry Ptasinski <henryp@broadcom.com>
-Nohee Ko <noheek@broadcom.com>
+Dowan Kim <dowan@broadcom.com>
 
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index bbbe7c5..9335f02 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -2222,8 +2222,6 @@
 	ASSERT(net);
 
 	ASSERT(!net->netdev_ops);
-	net->netdev_ops = &dhd_ops_virt;
-
 	net->netdev_ops = &dhd_ops_pri;
 
 	/*
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 3f29488..ea08252 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -95,12 +95,12 @@
 					    struct net_device *dev,
 					    u8 key_idx);
 static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
-				 u8 key_idx, const u8 *mac_addr,
+				 u8 key_idx, bool pairwise, const u8 *mac_addr,
 				 struct key_params *params);
 static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
-				 u8 key_idx, const u8 *mac_addr);
+				 u8 key_idx, bool pairwise, const u8 *mac_addr);
 static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
-				 u8 key_idx, const u8 *mac_addr,
+				 u8 key_idx, bool pairwise, const u8 *mac_addr,
 				 void *cookie, void (*callback) (void *cookie,
 								 struct
 								 key_params *
@@ -1615,7 +1615,7 @@
 
 static s32
 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
-		    u8 key_idx, const u8 *mac_addr,
+		    u8 key_idx, bool pairwise, const u8 *mac_addr,
 		    struct key_params *params)
 {
 	struct wl_wsec_key key;
@@ -1700,7 +1700,7 @@
 
 static s32
 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
-		    u8 key_idx, const u8 *mac_addr)
+		    u8 key_idx, bool pairwise, const u8 *mac_addr)
 {
 	struct wl_wsec_key key;
 	s32 err = 0;
@@ -1756,7 +1756,7 @@
 
 static s32
 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
-		    u8 key_idx, const u8 *mac_addr, void *cookie,
+		    u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
 		    void (*callback) (void *cookie, struct key_params * params))
 {
 	struct key_params params;
diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c
index 933ae4c..0e740b8 100644
--- a/drivers/staging/cpia/cpia.c
+++ b/drivers/staging/cpia/cpia.c
@@ -3184,13 +3184,9 @@
 		goto oops;
 	}
 
-	err = -EINTR;
-	if(signal_pending(current))
-		goto oops;
-
 	/* Set ownership of /proc/cpia/videoX to current user */
 	if(cam->proc_entry)
-		cam->proc_entry->uid = current_uid();
+		cam->proc_entry->uid = current_euid();
 
 	/* set mark for loading first frame uncompressed */
 	cam->first_frame = 1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
index 87a6487..20d5098 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
@@ -286,7 +286,6 @@
     pid = kernel_thread (exec_mknod, (void *)info, 0);
 
     // initialize application information
-    info->appcnt = 0;
 
 //    if (ft1000_flarion_cnt == 0) {
 //
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index 702a478..a99e900 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -212,9 +212,6 @@
 			   recvlen, requestid);
 
 		icmsghdrp = (struct icmsg_hdr *)&buf[
-			sizeof(struct vmbuspipe_hdr)];
-
-		icmsghdrp = (struct icmsg_hdr *)&buf[
 				sizeof(struct vmbuspipe_hdr)];
 
 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 463e5cb..9618c79 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -244,12 +244,12 @@
 	int retval, i;
 	struct stream_info *stream;
 	struct snd_sst_mmap_buff_entry *buf_entry;
+	struct snd_sst_mmap_buff_entry *tmp_buf;
 
 	pr_debug("sst:called for str_id %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return -EINVAL;
-	BUG_ON(!mmap_buf);
 
 	stream = &sst_drv_ctx->streams[str_id];
 	if (stream->mmapped != true)
@@ -262,14 +262,24 @@
 	stream->curr_bytes = 0;
 	stream->cumm_bytes = 0;
 
+	tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
+	if (!tmp_buf)
+		return -ENOMEM;
+	if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
+			mmap_buf->entries * sizeof(*tmp_buf))) {
+		retval = -EFAULT;
+		goto out_free;
+	}
+
 	pr_debug("sst:new buffers count %d status %d\n",
 			mmap_buf->entries, stream->status);
-	buf_entry = mmap_buf->buff;
+	buf_entry = tmp_buf;
 	for (i = 0; i < mmap_buf->entries; i++) {
-		BUG_ON(!buf_entry);
 		bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
-		if (!bufs)
-			return -ENOMEM;
+		if (!bufs) {
+			retval = -ENOMEM;
+			goto out_free;
+		}
 		bufs->size = buf_entry->size;
 		bufs->offset = buf_entry->offset;
 		bufs->addr = sst_drv_ctx->mmap_mem;
@@ -293,13 +303,15 @@
 			if (sst_play_frame(str_id) < 0) {
 				pr_warn("sst: play frames fail\n");
 				mutex_unlock(&stream->lock);
-				return -EIO;
+				retval = -EIO;
+				goto out_free;
 			}
 		} else if (stream->ops == STREAM_OPS_CAPTURE) {
 			if (sst_capture_frame(str_id) < 0) {
 				pr_warn("sst: capture frame fail\n");
 				mutex_unlock(&stream->lock);
-				return -EIO;
+				retval = -EIO;
+				goto out_free;
 			}
 		}
 	}
@@ -314,6 +326,9 @@
 	if (retval >= 0)
 		retval = stream->cumm_bytes;
 	pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
+
+out_free:
+	kfree(tmp_buf);
 	return retval;
 }
 
@@ -377,7 +392,7 @@
 {
 	struct sst_stream_bufs *stream_bufs;
 	unsigned long index, mmap_len;
-	unsigned char *bufp;
+	unsigned char __user *bufp;
 	unsigned long size, copied_size;
 	int retval = 0, add_to_list = 0;
 	static int sent_offset;
@@ -512,9 +527,7 @@
 			/* copy to user */
 			list_for_each_entry_safe(entry, _entry,
 						copy_to_list, node) {
-				if (copy_to_user((void *)
-					     iovec[entry->iov_index].iov_base +
-					     entry->iov_offset,
+				if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
 					     kbufs->addr + entry->offset,
 					     entry->size)) {
 					/* Clean up the list and return error */
@@ -590,7 +603,7 @@
 			buf, (int) count, (int) stream->status);
 
 	stream->buf_type = SST_BUF_USER_STATIC;
-	iovec.iov_base = (void *)buf;
+	iovec.iov_base = buf;
 	iovec.iov_len  = count;
 	nr_segs = 1;
 
@@ -838,7 +851,7 @@
 		break;
 
 	case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
-		struct snd_sst_params *str_param = (struct snd_sst_params *)arg;
+		struct snd_sst_params str_param;
 
 		pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
 		if (minor != STREAM_MODULE) {
@@ -846,17 +859,25 @@
 			break;
 		}
 
+		if (copy_from_user(&str_param, (void __user *)arg,
+				sizeof(str_param))) {
+			retval = -EFAULT;
+			break;
+		}
+
 		if (!str_id) {
 
-			retval = sst_get_stream(str_param);
+			retval = sst_get_stream(&str_param);
 			if (retval > 0) {
 				struct stream_info *str_info;
+				char __user *dest;
+
 				sst_drv_ctx->stream_cnt++;
 				data->str_id = retval;
 				str_info = &sst_drv_ctx->streams[retval];
 				str_info->src = SST_DRV;
-				retval = copy_to_user(&str_param->stream_id,
-						&retval, sizeof(__u32));
+				dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
+				retval = copy_to_user(dest, &retval, sizeof(__u32));
 				if (retval)
 					retval = -EFAULT;
 			} else {
@@ -866,16 +887,14 @@
 		} else {
 			pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
 			/* allocated set params only */
-			retval = sst_set_stream_param(str_id, str_param);
+			retval = sst_set_stream_param(str_id, &str_param);
 			/* Block the call for reply */
 			if (!retval) {
 				int sfreq = 0, word_size = 0, num_channel = 0;
-				sfreq =	str_param->sparams.uc.pcm_params.sfreq;
-				word_size = str_param->sparams.
-						uc.pcm_params.pcm_wd_sz;
-				num_channel = str_param->
-					sparams.uc.pcm_params.num_chan;
-				if (str_param->ops == STREAM_OPS_CAPTURE) {
+				sfreq =	str_param.sparams.uc.pcm_params.sfreq;
+				word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
+				num_channel = str_param.sparams.uc.pcm_params.num_chan;
+				if (str_param.ops == STREAM_OPS_CAPTURE) {
 					sst_drv_ctx->scard_ops->\
 					set_pcm_audio_params(sfreq,
 						word_size, num_channel);
@@ -885,41 +904,39 @@
 		break;
 	}
 	case _IOC_NR(SNDRV_SST_SET_VOL): {
-		struct snd_sst_vol *set_vol;
-		struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
-		pr_debug("sst: SET_VOLUME recieved for %d!\n",
-				rec_vol->stream_id);
-		if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
-			pr_debug("sst: invalid operation!\n");
-			retval = -EPERM;
-			break;
-		}
-		set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC);
-		if (!set_vol) {
-			pr_debug("sst: mem allocation failed\n");
-			retval = -ENOMEM;
-			break;
-		}
-		if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) {
+		struct snd_sst_vol set_vol;
+
+		if (copy_from_user(&set_vol, (void __user *)arg,
+				sizeof(set_vol))) {
 			pr_debug("sst: copy failed\n");
 			retval = -EFAULT;
 			break;
 		}
-		retval = sst_set_vol(set_vol);
-		kfree(set_vol);
-		break;
-	}
-	case _IOC_NR(SNDRV_SST_GET_VOL): {
-		struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
-		struct snd_sst_vol get_vol;
-		pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
-				rec_vol->stream_id);
-		if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
+		pr_debug("sst: SET_VOLUME recieved for %d!\n",
+				set_vol.stream_id);
+		if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
 			pr_debug("sst: invalid operation!\n");
 			retval = -EPERM;
 			break;
 		}
-		get_vol.stream_id = rec_vol->stream_id;
+		retval = sst_set_vol(&set_vol);
+		break;
+	}
+	case _IOC_NR(SNDRV_SST_GET_VOL): {
+		struct snd_sst_vol get_vol;
+
+		if (copy_from_user(&get_vol, (void __user *)arg,
+				sizeof(get_vol))) {
+			retval = -EFAULT;
+			break;
+		}
+		pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
+				get_vol.stream_id);
+		if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
+			pr_debug("sst: invalid operation!\n");
+			retval = -EPERM;
+			break;
+		}
 		retval = sst_get_vol(&get_vol);
 		if (retval) {
 			retval = -EIO;
@@ -928,7 +945,7 @@
 		pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
 				get_vol.stream_id, get_vol.volume,
 				get_vol.ramp_duration, get_vol.ramp_type);
-		if (copy_to_user((struct snd_sst_vol *)arg,
+		if (copy_to_user((struct snd_sst_vol __user *)arg,
 				&get_vol, sizeof(get_vol))) {
 			retval = -EFAULT;
 			break;
@@ -938,25 +955,20 @@
 	}
 
 	case _IOC_NR(SNDRV_SST_MUTE): {
-		struct snd_sst_mute *set_mute;
-		struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg;
-		pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
-			rec_mute->stream_id);
-		if (minor == STREAM_MODULE && rec_mute->stream_id == 0) {
-			retval = -EPERM;
-			break;
-		}
-		set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
-		if (!set_mute) {
-			retval = -ENOMEM;
-			break;
-		}
-		if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) {
+		struct snd_sst_mute set_mute;
+
+		if (copy_from_user(&set_mute, (void __user *)arg,
+				sizeof(set_mute))) {
 			retval = -EFAULT;
 			break;
 		}
-		retval = sst_set_mute(set_mute);
-		kfree(set_mute);
+		pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
+			set_mute.stream_id);
+		if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
+			retval = -EPERM;
+			break;
+		}
+		retval = sst_set_mute(&set_mute);
 		break;
 	}
 	case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
@@ -973,7 +985,7 @@
 			retval = -EIO;
 			break;
 		}
-		if (copy_to_user((struct snd_sst_get_stream_params *)arg,
+		if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
 					&get_params, sizeof(get_params))) {
 			retval = -EFAULT;
 			break;
@@ -983,16 +995,22 @@
 	}
 
 	case _IOC_NR(SNDRV_SST_MMAP_PLAY):
-	case _IOC_NR(SNDRV_SST_MMAP_CAPTURE):
+	case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
+		struct snd_sst_mmap_buffs mmap_buf;
+
 		pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
 		}
-		retval = intel_sst_mmap_play_capture(str_id,
-				(struct snd_sst_mmap_buffs *)arg);
+		if (copy_from_user(&mmap_buf, (void __user *)arg,
+				sizeof(mmap_buf))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
 		break;
-
+	}
 	case _IOC_NR(SNDRV_SST_STREAM_DROP):
 		pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
 		if (minor != STREAM_MODULE) {
@@ -1003,7 +1021,6 @@
 		break;
 
 	case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
-		unsigned long long *ms = (unsigned long long *)arg;
 		struct snd_sst_tstamp tstamp = {0};
 		unsigned long long time, freq, mod;
 
@@ -1013,14 +1030,14 @@
 			break;
 		}
 		memcpy_fromio(&tstamp,
-			((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
-			+(str_id * sizeof(tstamp))),
+			sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
 			sizeof(tstamp));
 		time = tstamp.samples_rendered;
 		freq = (unsigned long long) tstamp.sampling_frequency;
 		time = time * 1000; /* converting it to ms */
 		mod = do_div(time, freq);
-		if (copy_to_user(ms, &time, sizeof(*ms)))
+		if (copy_to_user((void __user *)arg, &time,
+				sizeof(unsigned long long)))
 			retval = -EFAULT;
 		break;
 	}
@@ -1065,92 +1082,118 @@
 	}
 
 	case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
-		struct snd_sst_target_device *target_device;
+		struct snd_sst_target_device target_device;
 
 		pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
-		target_device =	(struct snd_sst_target_device *)arg;
-		BUG_ON(!target_device);
+		if (copy_from_user(&target_device, (void __user *)arg,
+				sizeof(target_device))) {
+			retval = -EFAULT;
+			break;
+		}
 		if (minor != AM_MODULE) {
 			retval = -EBADRQC;
 			break;
 		}
-		retval = sst_target_device_select(target_device);
+		retval = sst_target_device_select(&target_device);
 		break;
 	}
 
 	case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
-		struct snd_sst_driver_info *info =
-			(struct snd_sst_driver_info *)arg;
+		struct snd_sst_driver_info info;
 
 		pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
-		info->version = SST_VERSION_NUM;
+		info.version = SST_VERSION_NUM;
 		/* hard coding, shud get sumhow later */
-		info->active_pcm_streams = sst_drv_ctx->stream_cnt -
+		info.active_pcm_streams = sst_drv_ctx->stream_cnt -
 						sst_drv_ctx->encoded_cnt;
-		info->active_enc_streams = sst_drv_ctx->encoded_cnt;
-		info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
-		info->max_enc_streams = MAX_ENC_STREAM;
-		info->buf_per_stream = sst_drv_ctx->mmap_len;
+		info.active_enc_streams = sst_drv_ctx->encoded_cnt;
+		info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
+		info.max_enc_streams = MAX_ENC_STREAM;
+		info.buf_per_stream = sst_drv_ctx->mmap_len;
+		if (copy_to_user((void __user *)arg, &info,
+				sizeof(info)))
+			retval = -EFAULT;
 		break;
 	}
 
 	case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
-		struct snd_sst_dbufs *param =
-				(struct snd_sst_dbufs *)arg, dbufs_local;
-		int i;
+		struct snd_sst_dbufs param;
+		struct snd_sst_dbufs dbufs_local;
 		struct snd_sst_buffs ibufs, obufs;
-		struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries],
-				obuf_temp[param->obufs->entries];
+		struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
+		char __user *dest;
 
 		pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
 		}
-		if (!param) {
-			retval = -EINVAL;
+		if (copy_from_user(&param, (void __user *)arg,
+				sizeof(param))) {
+			retval = -EFAULT;
 			break;
 		}
 
-		dbufs_local.input_bytes_consumed = param->input_bytes_consumed;
+		dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
 		dbufs_local.output_bytes_produced =
-					param->output_bytes_produced;
-		dbufs_local.ibufs = &ibufs;
-		dbufs_local.obufs = &obufs;
-		dbufs_local.ibufs->entries = param->ibufs->entries;
-		dbufs_local.ibufs->type = param->ibufs->type;
-		dbufs_local.obufs->entries = param->obufs->entries;
-		dbufs_local.obufs->type = param->obufs->type;
+					param.output_bytes_produced;
 
-		dbufs_local.ibufs->buff_entry = ibuf_temp;
-		for (i = 0; i < dbufs_local.ibufs->entries; i++) {
-			ibuf_temp[i].buffer =
-				param->ibufs->buff_entry[i].buffer;
-			ibuf_temp[i].size =
-				param->ibufs->buff_entry[i].size;
+		if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
+			retval = -EFAULT;
+			break;
 		}
-		dbufs_local.obufs->buff_entry = obuf_temp;
-		for (i = 0; i < dbufs_local.obufs->entries; i++) {
-			obuf_temp[i].buffer =
-				param->obufs->buff_entry[i].buffer;
-			obuf_temp[i].size =
-				param->obufs->buff_entry[i].size;
+		if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
+			retval = -EFAULT;
+			break;
 		}
+
+		ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
+		obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
+		if (!ibuf_tmp || !obuf_tmp) {
+			retval = -ENOMEM;
+			goto free_iobufs;
+		}
+
+		if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
+				ibufs.entries * sizeof(*ibuf_tmp))) {
+			retval = -EFAULT;
+			goto free_iobufs;
+		}
+		ibufs.buff_entry = ibuf_tmp;
+		dbufs_local.ibufs = &ibufs;
+
+		if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
+				obufs.entries * sizeof(*obuf_tmp))) {
+			retval = -EFAULT;
+			goto free_iobufs;
+		}
+		obufs.buff_entry = obuf_tmp;
+		dbufs_local.obufs = &obufs;
+
 		retval = sst_decode(str_id, &dbufs_local);
-		if (retval)
-			retval =  -EAGAIN;
-		if (copy_to_user(&param->input_bytes_consumed,
+		if (retval) {
+			retval = -EAGAIN;
+			goto free_iobufs;
+		}
+
+		dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
+		if (copy_to_user(dest,
 				&dbufs_local.input_bytes_consumed,
 				sizeof(unsigned long long))) {
-			retval =  -EFAULT;
-			break;
+			retval = -EFAULT;
+			goto free_iobufs;
 		}
-		if (copy_to_user(&param->output_bytes_produced,
+
+		dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
+		if (copy_to_user(dest,
 				&dbufs_local.output_bytes_produced,
 				sizeof(unsigned long long))) {
-			retval =  -EFAULT;
-			break;
+			retval = -EFAULT;
+			goto free_iobufs;
 		}
+free_iobufs:
+		kfree(ibuf_tmp);
+		kfree(obuf_tmp);
 		break;
 	}
 
@@ -1164,7 +1207,7 @@
 		break;
 
 	case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
-		unsigned long long *bytes = (unsigned long long *)arg;
+		unsigned long long __user *bytes = (unsigned long long __user *)arg;
 		struct snd_sst_tstamp tstamp = {0};
 
 		pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
@@ -1173,8 +1216,7 @@
 			break;
 		}
 		memcpy_fromio(&tstamp,
-			((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
-			+(str_id * sizeof(tstamp))),
+			sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
 			sizeof(tstamp));
 		if (copy_to_user(bytes, &tstamp.bytes_processed,
 				sizeof(*bytes)))
@@ -1197,7 +1239,7 @@
 			kfree(fw_info);
 			break;
 		}
-		if (copy_to_user((struct snd_sst_dbufs *)arg,
+		if (copy_to_user((struct snd_sst_dbufs __user *)arg,
 				fw_info, sizeof(*fw_info))) {
 			kfree(fw_info);
 			retval = -EFAULT;
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index 73a98c8..bf0ead7 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -231,8 +231,8 @@
 	spinlock_t          pcm_lock;
 	bool			mmapped;
 	unsigned int		sg_index; /*  current buf Index  */
-	unsigned char		*cur_ptr; /*  Current static bufs  */
-	struct snd_sst_buf_entry *buf_entry;
+	unsigned char __user 	*cur_ptr; /*  Current static bufs  */
+	struct snd_sst_buf_entry __user *buf_entry;
 	struct sst_block	data_blk; /* stream ops block */
 	struct sst_block	ctrl_blk; /* stream control cmd block */
 	enum snd_sst_buf_type   buf_type;
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 1934805..978bf87 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -22,7 +22,7 @@
 	int	result;
 	BYTE	MiscReg03 = 0;
 
-	printk("--- Initial Nedia ---\n");
+	printk("--- Init Media ---\n");
 	result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
 	if (result != USB_STOR_XFER_GOOD)
 	{
@@ -64,7 +64,7 @@
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int result;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x01;
 	bcb->Flags			= 0x80;
@@ -92,7 +92,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->Flags = 0x80;
 	bcb->CDB[0] = 0xF2;
@@ -112,7 +112,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -161,7 +161,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -219,7 +219,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -341,7 +341,7 @@
 		break;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x800;
 	bcb->Flags =0x00;
@@ -433,7 +433,7 @@
 
 	//printk("transport --- ENE_Read_Data\n");
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = length;
 	bcb->Flags =0x80;
@@ -470,7 +470,7 @@
 
 	//printk("transport --- ENE_Write_Data\n");
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = length;
 	bcb->Flags =0x00;
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index d4340a9..9a3fdb4 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -15,7 +15,7 @@
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x200*len;
 	bcb->Flags			= 0x00;
@@ -53,7 +53,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read Page Data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x200;
 	bcb->Flags			= 0x80;
@@ -69,7 +69,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read Extra Data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4;
 	bcb->Flags			= 0x80;
@@ -108,7 +108,7 @@
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x200;
 	bcb->Flags			= 0x80;
@@ -673,7 +673,7 @@
 	//printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen);
 
 	// Read Extra Data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4 * blen;
 	bcb->Flags			= 0x80;
@@ -700,7 +700,7 @@
 	BYTE	ExtBuf[4];
 
 	//printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum);
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4;
 	bcb->Flags			= 0x80;
@@ -807,7 +807,7 @@
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4;
 	bcb->Flags			= 0x80;
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c
index ad0c5c6..cb92d25 100644
--- a/drivers/staging/keucr/msscsi.c
+++ b/drivers/staging/keucr/msscsi.c
@@ -145,7 +145,7 @@
 		}
 
 		// set up the command wrapper
-		memset(bcb, 0, sizeof(bcb));
+		memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 		bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 		bcb->DataTransferLength = blenByte;
 		bcb->Flags  = 0x80;
@@ -193,7 +193,7 @@
 			blkno  = phyblk * 0x20 + PageNum;
 
 			// set up the command wrapper
-			memset(bcb, 0, sizeof(bcb));
+			memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 			bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 			bcb->DataTransferLength = 0x200 * len;
 			bcb->Flags  = 0x80;
@@ -250,7 +250,7 @@
 		}
 
 		// set up the command wrapper
-		memset(bcb, 0, sizeof(bcb));
+		memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 		bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 		bcb->DataTransferLength = blenByte;
 		bcb->Flags  = 0x00;
diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c
index 6c332f8..d646507 100644
--- a/drivers/staging/keucr/sdscsi.c
+++ b/drivers/staging/keucr/sdscsi.c
@@ -152,7 +152,7 @@
 		bnByte = bn;
 		
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = blenByte;
 	bcb->Flags  = 0x80;
@@ -192,7 +192,7 @@
 		bnByte = bn;
 
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = blenByte;
 	bcb->Flags  = 0x00;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 844b659..1b52535 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -266,7 +266,7 @@
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
 	// Read sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -281,7 +281,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read redundant
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
@@ -319,7 +319,7 @@
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
 	// Read sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200*count;
 	bcb->Flags			= 0x80;
@@ -334,7 +334,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read redundant
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
@@ -536,7 +536,7 @@
 	WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
 
 	// Write sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200*count;
 	bcb->Flags			= 0x00;
@@ -754,7 +754,7 @@
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
 	// Write sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x00;
@@ -791,7 +791,7 @@
 	addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
 	addr=addr*(WORD)Ssfdc.MaxSectors;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -827,7 +827,7 @@
 	addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
@@ -870,7 +870,7 @@
 	addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index fd98df6..111160c 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -40,7 +40,7 @@
 	us->current_urb->error_count = 0;
 	us->current_urb->status = 0;
 
-//	us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
+	us->current_urb->transfer_flags = 0;
 	if (us->current_urb->transfer_buffer == us->iobuf)
 		us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 	us->current_urb->transfer_dma = us->iobuf_dma;
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c
index 1d159ff..a99879b 100644
--- a/drivers/staging/rt2860/common/cmm_aes.c
+++ b/drivers/staging/rt2860/common/cmm_aes.c
@@ -330,8 +330,6 @@
 	for (i = 8; i < 14; i++)
 		mic_iv[i] = pn_vector[13 - i];	/* mic_iv[8:13] = PN[5:0] */
 #endif
-	i = (payload_length / 256);
-	i = (payload_length % 256);
 	mic_iv[14] = (unsigned char)(payload_length / 256);
 	mic_iv[15] = (unsigned char)(payload_length % 256);
 
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ebf9074..ddacfc6 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -65,6 +65,7 @@
 	{USB_DEVICE(0x14B2, 0x3C07)},	/* AL */
 	{USB_DEVICE(0x050D, 0x8053)},	/* Belkin */
 	{USB_DEVICE(0x050D, 0x825B)},	/* Belkin */
+	{USB_DEVICE(0x050D, 0x935A)},	/* Belkin F6D4050 v1 */
 	{USB_DEVICE(0x050D, 0x935B)},	/* Belkin F6D4050 v2 */
 	{USB_DEVICE(0x14B2, 0x3C23)},	/* Airlink */
 	{USB_DEVICE(0x14B2, 0x3C27)},	/* Airlink */
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index a202194..b1786dc 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5829,6 +5829,9 @@
                     }
                 }
 
+		pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
+			priv->rxbuffersize, PCI_DMA_FROMDEVICE);
+
                 skb = new_skb;
                 priv->rx_buf[priv->rx_idx] = skb;
                 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c
index a057824..807dd7e 100644
--- a/drivers/staging/stradis/stradis.c
+++ b/drivers/staging/stradis/stradis.c
@@ -1286,6 +1286,7 @@
 	case VIDIOCGCAP:
 		{
 			struct video_capability b;
+			memset(&b, 0, sizeof(b));
 			strcpy(b.name, saa->video_dev.name);
 			b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY |
 				VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
@@ -1416,6 +1417,7 @@
 	case VIDIOCGWIN:
 		{
 			struct video_window vw;
+			memset(&vw, 0, sizeof(vw));
 			vw.x = saa->win.x;
 			vw.y = saa->win.y;
 			vw.width = saa->win.width;
@@ -1448,6 +1450,7 @@
 	case VIDIOCGFBUF:
 		{
 			struct video_buffer v;
+			memset(&v, 0, sizeof(v));
 			v.base = (void *)saa->win.vidadr;
 			v.height = saa->win.sheight;
 			v.width = saa->win.swidth;
@@ -1492,6 +1495,7 @@
 	case VIDIOCGAUDIO:
 		{
 			struct video_audio v;
+			memset(&v, 0, sizeof(v));
 			v = saa->audio_dev;
 			v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE);
 			v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME;
@@ -1534,6 +1538,7 @@
 	case VIDIOCGUNIT:
 		{
 			struct video_unit vu;
+			memset(&vu, 0, sizeof(vu));
 			vu.video = saa->video_dev.minor;
 			vu.vbi = VIDEO_NO_UNIT;
 			vu.radio = VIDEO_NO_UNIT;
@@ -1888,6 +1893,7 @@
 
 	saa->user++;
 	if (saa->user > 1) {
+		saa->user--;
 		unlock_kernel();
 		return 0;	/* device open already, don't reset */
 	}
@@ -2000,10 +2006,13 @@
 	if (retval < 0) {
 		dev_err(&pdev->dev, "%d: error in registering video device!\n",
 			num);
-		goto errio;
+		goto errirq;
 	}
 
 	return 0;
+
+errirq:
+	free_irq(saa->irq, saa);
 errio:
 	iounmap(saa->saa7146_mem);
 err:
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index ff64d46..93de4f2 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -6,7 +6,6 @@
 	tristate "DSP Bridge driver"
 	depends on ARCH_OMAP3
 	select OMAP_MBOX_FWK
-	select OMAP_IOMMU
 	help
 	  DSP/BIOS Bridge is designed for platforms that contain a GPP and
 	  one or more attached DSPs.  The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 50decc2..41c644c 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -2,18 +2,19 @@
 
 libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
 libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
-		core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \
+		core/tiomap3430_pwr.o core/tiomap_io.o \
 		core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
 libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
-		pmgr/cmm.o pmgr/dbll.o
+		pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
 librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
 		rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
 		rmgr/nldr.o rmgr/drv_interface.o
 libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
 		 dynload/tramp.o
+libhw = hw/hw_mmu.o
 
 bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
-			$(libdload)
+			$(libdload) $(libhw)
 
 #Machine dependent
 ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
index 8ae2633..16723cd 100644
--- a/drivers/staging/tidspbridge/core/_deh.h
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -27,8 +27,9 @@
 struct deh_mgr {
 	struct bridge_dev_context *hbridge_context;	/* Bridge context. */
 	struct ntfy_object *ntfy_obj;	/* NTFY object */
-};
 
-int mmu_fault_isr(struct iommu *mmu);
+	/* MMU Fault DPC */
+	struct tasklet_struct dpc_tasklet;
+};
 
 #endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index e0a801c..1c1f157 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,8 +23,8 @@
 #include <plat/clockdomain.h>
 #include <mach-omap2/prm-regbits-34xx.h>
 #include <mach-omap2/cm-regbits-34xx.h>
-#include <dspbridge/dsp-mmu.h>
 #include <dspbridge/devdefs.h>
+#include <hw_defs.h>
 #include <dspbridge/dspioctl.h>	/* for bridge_ioctl_extproc defn */
 #include <dspbridge/sync.h>
 #include <dspbridge/clk.h>
@@ -306,18 +306,6 @@
 
 #define CLEAR_BIT_INDEX(reg, index)   (reg &= ~(1 << (index)))
 
-struct shm_segs {
-	u32 seg0_da;
-	u32 seg0_pa;
-	u32 seg0_va;
-	u32 seg0_size;
-	u32 seg1_da;
-	u32 seg1_pa;
-	u32 seg1_va;
-	u32 seg1_size;
-};
-
-
 /* This Bridge driver's device context: */
 struct bridge_dev_context {
 	struct dev_object *hdev_obj;	/* Handle to Bridge device object. */
@@ -328,6 +316,7 @@
 	 */
 	u32 dw_dsp_ext_base_addr;	/* See the comment above */
 	u32 dw_api_reg_base;	/* API mem map'd registers */
+	void __iomem *dw_dsp_mmu_base;	/* DSP MMU Mapped registers */
 	u32 dw_api_clk_base;	/* CLK Registers */
 	u32 dw_dsp_clk_m2_base;	/* DSP Clock Module m2 */
 	u32 dw_public_rhea;	/* Pub Rhea */
@@ -339,8 +328,7 @@
 	u32 dw_internal_size;	/* Internal memory size */
 
 	struct omap_mbox *mbox;		/* Mail box handle */
-	struct iommu *dsp_mmu;      /* iommu for iva2 handler */
-	struct shm_segs sh_s;
+
 	struct cfg_hostres *resources;	/* Host Resources */
 
 	/*
@@ -353,6 +341,7 @@
 
 	/* TC Settings */
 	bool tc_word_swap_on;	/* Traffic Controller Word Swap */
+	struct pg_table_attrs *pt_attrs;
 	u32 dsp_per_clks;
 };
 
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c
deleted file mode 100644
index 983c95a..0000000
--- a/drivers/staging/tidspbridge/core/dsp-mmu.c
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * dsp-mmu.c
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * DSP iommu.
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#include <dspbridge/host_os.h>
-#include <plat/dmtimer.h>
-#include <dspbridge/dbdefs.h>
-#include <dspbridge/dev.h>
-#include <dspbridge/io_sm.h>
-#include <dspbridge/dspdeh.h>
-#include "_tiomap.h"
-
-#include <dspbridge/dsp-mmu.h>
-
-#define MMU_CNTL_TWL_EN		(1 << 2)
-
-static struct tasklet_struct mmu_tasklet;
-
-#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
-static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
-{
-	void *dummy_addr;
-	u32 fa, tmp;
-	struct iotlb_entry e;
-	struct iommu *mmu = dev_context->dsp_mmu;
-	dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
-
-	/*
-	 * Before acking the MMU fault, let's make sure MMU can only
-	 * access entry #0. Then add a new entry so that the DSP OS
-	 * can continue in order to dump the stack.
-	 */
-	tmp = iommu_read_reg(mmu, MMU_CNTL);
-	tmp &= ~MMU_CNTL_TWL_EN;
-	iommu_write_reg(mmu, tmp, MMU_CNTL);
-	fa = iommu_read_reg(mmu, MMU_FAULT_AD);
-	e.da = fa & PAGE_MASK;
-	e.pa = virt_to_phys(dummy_addr);
-	e.valid = 1;
-	e.prsvd = 1;
-	e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
-	e.endian = MMU_RAM_ENDIAN_LITTLE;
-	e.elsz = MMU_RAM_ELSZ_32;
-	e.mixed = 0;
-
-	load_iotlb_entry(mmu, &e);
-
-	dsp_clk_enable(DSP_CLK_GPT8);
-
-	dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
-
-	/* Clear MMU interrupt */
-	tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
-	iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
-
-	dump_dsp_stack(dev_context);
-	dsp_clk_disable(DSP_CLK_GPT8);
-
-	iopgtable_clear_entry(mmu, fa);
-	free_page((unsigned long)dummy_addr);
-}
-#endif
-
-
-static void fault_tasklet(unsigned long data)
-{
-	struct iommu *mmu = (struct iommu *)data;
-	struct bridge_dev_context *dev_ctx;
-	struct deh_mgr *dm;
-	u32 fa;
-	dev_get_deh_mgr(dev_get_first(), &dm);
-	dev_get_bridge_context(dev_get_first(), &dev_ctx);
-
-	if (!dm || !dev_ctx)
-		return;
-
-	fa = iommu_read_reg(mmu, MMU_FAULT_AD);
-
-#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
-	print_dsp_trace_buffer(dev_ctx);
-	dump_dl_modules(dev_ctx);
-	mmu_fault_print_stack(dev_ctx);
-#endif
-
-	bridge_deh_notify(dm, DSP_MMUFAULT, fa);
-}
-
-/*
- *  ======== mmu_fault_isr ========
- *      ISR to be triggered by a DSP MMU fault interrupt.
- */
-static int mmu_fault_callback(struct iommu *mmu)
-{
-	if (!mmu)
-		return -EPERM;
-
-	iommu_write_reg(mmu, 0, MMU_IRQENABLE);
-	tasklet_schedule(&mmu_tasklet);
-	return 0;
-}
-
-/**
- * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
- *
- * This function initialize dsp mmu module and returns a struct iommu
- * handle to use it for dsp maps.
- *
- */
-struct iommu *dsp_mmu_init()
-{
-	struct iommu *mmu;
-
-	mmu = iommu_get("iva2");
-
-	if (!IS_ERR(mmu)) {
-		tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
-		mmu->isr = mmu_fault_callback;
-	}
-
-	return mmu;
-}
-
-/**
- * dsp_mmu_exit() - destroy dsp mmu module
- * @mmu:	Pointer to iommu handle.
- *
- * This function destroys dsp mmu module.
- *
- */
-void dsp_mmu_exit(struct iommu *mmu)
-{
-	if (mmu)
-		iommu_put(mmu);
-	tasklet_kill(&mmu_tasklet);
-}
-
-/**
- * user_va2_pa() - get physical address from userspace address.
- * @mm:		mm_struct Pointer of the process.
- * @address:	Virtual user space address.
- *
- */
-static u32 user_va2_pa(struct mm_struct *mm, u32 address)
-{
-	pgd_t *pgd;
-	pmd_t *pmd;
-	pte_t *ptep, pte;
-
-	pgd = pgd_offset(mm, address);
-	if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
-		pmd = pmd_offset(pgd, address);
-		if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
-			ptep = pte_offset_map(pmd, address);
-			if (ptep) {
-				pte = *ptep;
-				if (pte_present(pte))
-					return pte & PAGE_MASK;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * get_io_pages() - pin and get pages of io user's buffer.
- * @mm:		mm_struct Pointer of the process.
- * @uva:		Virtual user space address.
- * @pages	Pages to be pined.
- * @usr_pgs	struct page array pointer where the user pages will be stored
- *
- */
-static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
-						struct page **usr_pgs)
-{
-	u32 pa;
-	int i;
-	struct page *pg;
-
-	for (i = 0; i < pages; i++) {
-		pa = user_va2_pa(mm, uva);
-
-		if (!pfn_valid(__phys_to_pfn(pa)))
-			break;
-
-		pg = phys_to_page(pa);
-		usr_pgs[i] = pg;
-		get_page(pg);
-	}
-	return i;
-}
-
-/**
- * user_to_dsp_map() - maps user to dsp virtual address
- * @mmu:	Pointer to iommu handle.
- * @uva:		Virtual user space address.
- * @da		DSP address
- * @size		Buffer size to map.
- * @usr_pgs	struct page array pointer where the user pages will be stored
- *
- * This function maps a user space buffer into DSP virtual address.
- *
- */
-u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
-				struct page **usr_pgs)
-{
-	int res, w;
-	unsigned pages;
-	int i;
-	struct vm_area_struct *vma;
-	struct mm_struct *mm = current->mm;
-	struct sg_table *sgt;
-	struct scatterlist *sg;
-
-	if (!size || !usr_pgs)
-		return -EINVAL;
-
-	pages = size / PG_SIZE4K;
-
-	down_read(&mm->mmap_sem);
-	vma = find_vma(mm, uva);
-	while (vma && (uva + size > vma->vm_end))
-		vma = find_vma(mm, vma->vm_end + 1);
-
-	if (!vma) {
-		pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
-						__func__, uva, size);
-		up_read(&mm->mmap_sem);
-		return -EINVAL;
-	}
-	if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
-		w = 1;
-
-	if (vma->vm_flags & VM_IO)
-		i = get_io_pages(mm, uva, pages, usr_pgs);
-	else
-		i = get_user_pages(current, mm, uva, pages, w, 1,
-							usr_pgs, NULL);
-	up_read(&mm->mmap_sem);
-
-	if (i < 0)
-		return i;
-
-	if (i < pages) {
-		res = -EFAULT;
-		goto err_pages;
-	}
-
-	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
-	if (!sgt) {
-		res = -ENOMEM;
-		goto err_pages;
-	}
-
-	res = sg_alloc_table(sgt, pages, GFP_KERNEL);
-
-	if (res < 0)
-		goto err_sg;
-
-	for_each_sg(sgt->sgl, sg, sgt->nents, i)
-		sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
-
-	da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-
-	if (!IS_ERR_VALUE(da))
-		return da;
-	res = (int)da;
-
-	sg_free_table(sgt);
-err_sg:
-	kfree(sgt);
-	i = pages;
-err_pages:
-	while (i--)
-		put_page(usr_pgs[i]);
-	return res;
-}
-
-/**
- * user_to_dsp_unmap() - unmaps DSP virtual buffer.
- * @mmu:	Pointer to iommu handle.
- * @da		DSP address
- *
- * This function unmaps a user space buffer into DSP virtual address.
- *
- */
-int user_to_dsp_unmap(struct iommu *mmu, u32 da)
-{
-	unsigned i;
-	struct sg_table *sgt;
-	struct scatterlist *sg;
-
-	sgt = iommu_vunmap(mmu, da);
-	if (!sgt)
-		return -EFAULT;
-
-	for_each_sg(sgt->sgl, sg, sgt->nents, i)
-		put_page(sg_page(sg));
-	sg_free_table(sgt);
-	kfree(sgt);
-
-	return 0;
-}
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 194bada..5718645 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -39,6 +39,10 @@
 #include <dspbridge/ntfy.h>
 #include <dspbridge/sync.h>
 
+/* Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 /* Bridge Driver */
 #include <dspbridge/dspdeh.h>
 #include <dspbridge/dspio.h>
@@ -287,7 +291,6 @@
 	struct cod_manager *cod_man;
 	struct chnl_mgr *hchnl_mgr;
 	struct msg_mgr *hmsg_mgr;
-	struct shm_segs *sm_sg;
 	u32 ul_shm_base;
 	u32 ul_shm_base_offset;
 	u32 ul_shm_limit;
@@ -310,9 +313,18 @@
 	struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
 	struct cfg_hostres *host_res;
 	struct bridge_dev_context *pbridge_context;
+	u32 map_attrs;
 	u32 shm0_end;
 	u32 ul_dyn_ext_base;
 	u32 ul_seg1_size = 0;
+	u32 pa_curr = 0;
+	u32 va_curr = 0;
+	u32 gpp_va_curr = 0;
+	u32 num_bytes = 0;
+	u32 all_bits = 0;
+	u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+		HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+	};
 
 	status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
 	if (!pbridge_context) {
@@ -325,8 +337,6 @@
 		status = -EFAULT;
 		goto func_end;
 	}
-	sm_sg = &pbridge_context->sh_s;
-
 	status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
 	if (!cod_man) {
 		status = -EFAULT;
@@ -461,14 +471,129 @@
 	if (status)
 		goto func_end;
 
-	sm_sg->seg1_pa = ul_gpp_pa;
-	sm_sg->seg1_da = ul_dyn_ext_base;
-	sm_sg->seg1_va = ul_gpp_va;
-	sm_sg->seg1_size = ul_seg1_size;
-	sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size;
-	sm_sg->seg0_da = ul_dsp_va;
-	sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size;
-	sm_sg->seg0_size = ul_seg_size;
+	pa_curr = ul_gpp_pa;
+	va_curr = ul_dyn_ext_base * hio_mgr->word_size;
+	gpp_va_curr = ul_gpp_va;
+	num_bytes = ul_seg1_size;
+
+	/*
+	 * Try to fit into TLB entries. If not possible, push them to page
+	 * tables. It is quite possible that if sections are not on
+	 * bigger page boundary, we may end up making several small pages.
+	 * So, push them onto page tables, if that is the case.
+	 */
+	map_attrs = 0x00000000;
+	map_attrs = DSP_MAPLITTLEENDIAN;
+	map_attrs |= DSP_MAPPHYSICALADDR;
+	map_attrs |= DSP_MAPELEMSIZE32;
+	map_attrs |= DSP_MAPDONOTLOCK;
+
+	while (num_bytes) {
+		/*
+		 * To find the max. page size with which both PA & VA are
+		 * aligned.
+		 */
+		all_bits = pa_curr | va_curr;
+		dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
+			"num_bytes %x\n", all_bits, pa_curr, va_curr,
+			num_bytes);
+		for (i = 0; i < 4; i++) {
+			if ((num_bytes >= page_size[i]) && ((all_bits &
+							     (page_size[i] -
+							      1)) == 0)) {
+				status =
+				    hio_mgr->intf_fxns->
+				    pfn_brd_mem_map(hio_mgr->hbridge_context,
+						    pa_curr, va_curr,
+						    page_size[i], map_attrs,
+						    NULL);
+				if (status)
+					goto func_end;
+				pa_curr += page_size[i];
+				va_curr += page_size[i];
+				gpp_va_curr += page_size[i];
+				num_bytes -= page_size[i];
+				/*
+				 * Don't try smaller sizes. Hopefully we have
+				 * reached an address aligned to a bigger page
+				 * size.
+				 */
+				break;
+			}
+		}
+	}
+	pa_curr += ul_pad_size;
+	va_curr += ul_pad_size;
+	gpp_va_curr += ul_pad_size;
+
+	/* Configure the TLB entries for the next cacheable segment */
+	num_bytes = ul_seg_size;
+	va_curr = ul_dsp_va * hio_mgr->word_size;
+	while (num_bytes) {
+		/*
+		 * To find the max. page size with which both PA & VA are
+		 * aligned.
+		 */
+		all_bits = pa_curr | va_curr;
+		dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
+			"va_curr %x, num_bytes %x\n", all_bits, pa_curr,
+			va_curr, num_bytes);
+		for (i = 0; i < 4; i++) {
+			if (!(num_bytes >= page_size[i]) ||
+			    !((all_bits & (page_size[i] - 1)) == 0))
+				continue;
+			if (ndx < MAX_LOCK_TLB_ENTRIES) {
+				/*
+				 * This is the physical address written to
+				 * DSP MMU.
+				 */
+				ae_proc[ndx].ul_gpp_pa = pa_curr;
+				/*
+				 * This is the virtual uncached ioremapped
+				 * address!!!
+				 */
+				ae_proc[ndx].ul_gpp_va = gpp_va_curr;
+				ae_proc[ndx].ul_dsp_va =
+				    va_curr / hio_mgr->word_size;
+				ae_proc[ndx].ul_size = page_size[i];
+				ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
+				ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
+				ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
+				dev_dbg(bridge, "shm MMU TLB entry PA %x"
+					" VA %x DSP_VA %x Size %x\n",
+					ae_proc[ndx].ul_gpp_pa,
+					ae_proc[ndx].ul_gpp_va,
+					ae_proc[ndx].ul_dsp_va *
+					hio_mgr->word_size, page_size[i]);
+				ndx++;
+			} else {
+				status =
+				    hio_mgr->intf_fxns->
+				    pfn_brd_mem_map(hio_mgr->hbridge_context,
+						    pa_curr, va_curr,
+						    page_size[i], map_attrs,
+						    NULL);
+				dev_dbg(bridge,
+					"shm MMU PTE entry PA %x"
+					" VA %x DSP_VA %x Size %x\n",
+					ae_proc[ndx].ul_gpp_pa,
+					ae_proc[ndx].ul_gpp_va,
+					ae_proc[ndx].ul_dsp_va *
+					hio_mgr->word_size, page_size[i]);
+				if (status)
+					goto func_end;
+			}
+			pa_curr += page_size[i];
+			va_curr += page_size[i];
+			gpp_va_curr += page_size[i];
+			num_bytes -= page_size[i];
+			/*
+			 * Don't try smaller sizes. Hopefully we have reached
+			 * an address aligned to a bigger page size.
+			 */
+			break;
+		}
+	}
 
 	/*
 	 * Copy remaining entries from CDB. All entries are 1 MB and
@@ -509,12 +634,38 @@
 					"DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
 					ae_proc[ndx].ul_dsp_va);
 				ndx++;
+			} else {
+				status = hio_mgr->intf_fxns->pfn_brd_mem_map
+				    (hio_mgr->hbridge_context,
+				     hio_mgr->ext_proc_info.ty_tlb[i].
+				     ul_gpp_phys,
+				     hio_mgr->ext_proc_info.ty_tlb[i].
+				     ul_dsp_virt, 0x100000, map_attrs,
+				     NULL);
 			}
 		}
 		if (status)
 			goto func_end;
 	}
 
+	map_attrs = 0x00000000;
+	map_attrs = DSP_MAPLITTLEENDIAN;
+	map_attrs |= DSP_MAPPHYSICALADDR;
+	map_attrs |= DSP_MAPELEMSIZE32;
+	map_attrs |= DSP_MAPDONOTLOCK;
+
+	/* Map the L4 peripherals */
+	i = 0;
+	while (l4_peripheral_table[i].phys_addr) {
+		status = hio_mgr->intf_fxns->pfn_brd_mem_map
+		    (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
+		     l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
+		     map_attrs, NULL);
+		if (status)
+			goto func_end;
+		i++;
+	}
+
 	for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
 		ae_proc[i].ul_dsp_va = 0;
 		ae_proc[i].ul_gpp_pa = 0;
@@ -537,12 +688,12 @@
 		status = -EFAULT;
 		goto func_end;
 	} else {
-		if (sm_sg->seg0_da > ul_shm_base) {
+		if (ae_proc[0].ul_dsp_va > ul_shm_base) {
 			status = -EPERM;
 			goto func_end;
 		}
 		/* ul_shm_base may not be at ul_dsp_va address */
-		ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) *
+		ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
 		    hio_mgr->word_size;
 		/*
 		 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -566,7 +717,8 @@
 			goto func_end;
 		}
 		/* Register SM */
-		status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa);
+		status =
+		    register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
 	}
 
 	hio_mgr->shared_mem = (struct shm *)ul_shm_base;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f22bc12..1be081f 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -23,7 +23,6 @@
 #include <dspbridge/host_os.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
-#include <plat/control.h>
 
 /*  ----------------------------------- DSP/BIOS Bridge */
 #include <dspbridge/dbdefs.h>
@@ -35,6 +34,10 @@
 #include <dspbridge/drv.h>
 #include <dspbridge/sync.h>
 
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 /*  ----------------------------------- Link Driver */
 #include <dspbridge/dspdefs.h>
 #include <dspbridge/dspchnl.h>
@@ -47,6 +50,7 @@
 /*  ----------------------------------- Platform Manager */
 #include <dspbridge/dev.h>
 #include <dspbridge/dspapi.h>
+#include <dspbridge/dmm.h>
 #include <dspbridge/wdt.h>
 
 /*  ----------------------------------- Local */
@@ -67,6 +71,20 @@
 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
 #define PAGES_II_LVL_TABLE   512
+#define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
+
+/*
+ * This is a totally ugly layer violation, but needed until
+ * omap_ctrl_set_dsp_boot*() are provided.
+ */
+#define OMAP3_IVA2_BOOTMOD_IDLE 1
+#define OMAP2_CONTROL_GENERAL 0x270
+#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
+#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
+
+#define OMAP343X_CTRL_REGADDR(reg) \
+	OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
+
 
 /* Forward Declarations: */
 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -91,6 +109,12 @@
 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
 				    u8 *host_buff, u32 dsp_addr,
 				    u32 ul_num_bytes, u32 mem_type);
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes, u32 ul_map_attr,
+				  struct page **mapped_pages);
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+				     u32 virt_addr, u32 ul_num_bytes);
 static int bridge_dev_create(struct bridge_dev_context
 					**dev_cntxt,
 					struct dev_object *hdev_obj,
@@ -98,8 +122,57 @@
 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
 				  u32 dw_cmd, void *pargs);
 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
+static u32 user_va2_pa(struct mm_struct *mm, u32 address);
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+			     u32 va, u32 size,
+			     struct hw_mmu_map_attrs_t *map_attrs);
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+			  u32 size, struct hw_mmu_map_attrs_t *attrs);
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes,
+				  struct hw_mmu_map_attrs_t *hw_attrs);
+
 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
 
+/*  ----------------------------------- Globals */
+
+/* Attributes of L2 page tables for DSP MMU */
+struct page_info {
+	u32 num_entries;	/* Number of valid PTEs in the L2 PT */
+};
+
+/* Attributes used to manage the DSP MMU page tables */
+struct pg_table_attrs {
+	spinlock_t pg_lock;	/* Critical section object handle */
+
+	u32 l1_base_pa;		/* Physical address of the L1 PT */
+	u32 l1_base_va;		/* Virtual  address of the L1 PT */
+	u32 l1_size;		/* Size of the L1 PT */
+	u32 l1_tbl_alloc_pa;
+	/* Physical address of Allocated mem for L1 table. May not be aligned */
+	u32 l1_tbl_alloc_va;
+	/* Virtual address of Allocated mem for L1 table. May not be aligned */
+	u32 l1_tbl_alloc_sz;
+	/* Size of consistent memory allocated for L1 table.
+	 * May not be aligned */
+
+	u32 l2_base_pa;		/* Physical address of the L2 PT */
+	u32 l2_base_va;		/* Virtual  address of the L2 PT */
+	u32 l2_size;		/* Size of the L2 PT */
+	u32 l2_tbl_alloc_pa;
+	/* Physical address of Allocated mem for L2 table. May not be aligned */
+	u32 l2_tbl_alloc_va;
+	/* Virtual address of Allocated mem for L2 table. May not be aligned */
+	u32 l2_tbl_alloc_sz;
+	/* Size of consistent memory allocated for L2 table.
+	 * May not be aligned */
+
+	u32 l2_num_pages;	/* Number of allocated L2 PT */
+	/* Array [l2_num_pages] of L2 PT info structs */
+	struct page_info *pg_info;
+};
+
 /*
  *  This Bridge driver's function interface table.
  */
@@ -119,6 +192,8 @@
 	bridge_brd_set_state,
 	bridge_brd_mem_copy,
 	bridge_brd_mem_write,
+	bridge_brd_mem_map,
+	bridge_brd_mem_un_map,
 	/* The following CHNL functions are provided by chnl_io.lib: */
 	bridge_chnl_create,
 	bridge_chnl_destroy,
@@ -148,6 +223,27 @@
 	bridge_msg_set_queue_id,
 };
 
+static inline void flush_all(struct bridge_dev_context *dev_context)
+{
+	if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+	    dev_context->dw_brd_state == BRD_HIBERNATION)
+		wake_dsp(dev_context, NULL);
+
+	hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
+}
+
+static void bad_page_dump(u32 pa, struct page *pg)
+{
+	pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
+	pr_emerg("Bad page state in process '%s'\n"
+		 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+		 "Backtrace:\n",
+		 current->comm, pg, (int)(2 * sizeof(unsigned long)),
+		 (unsigned long)pg->flags, pg->mapping,
+		 page_mapcount(pg), page_count(pg));
+	dump_stack();
+}
+
 /*
  *  ======== bridge_drv_entry ========
  *  purpose:
@@ -203,7 +299,8 @@
 		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
 					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 	}
-
+	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 	dsp_clk_enable(DSP_CLK_IVA2);
 
 	/* set the device state to IDLE */
@@ -274,17 +371,14 @@
 {
 	int status = 0;
 	struct bridge_dev_context *dev_context = dev_ctxt;
-	struct iommu *mmu = NULL;
-	struct shm_segs *sm_sg;
-	int l4_i = 0, tlb_i = 0;
-	u32 sg0_da = 0, sg1_da = 0;
-	struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
 	u32 dw_sync_addr = 0;
 	u32 ul_shm_base;	/* Gpp Phys SM base addr(byte) */
 	u32 ul_shm_base_virt;	/* Dsp Virt SM base addr */
 	u32 ul_tlb_base_virt;	/* Base of MMU TLB entry */
 	/* Offset of shm_base_virt from tlb_base_virt */
 	u32 ul_shm_offset_virt;
+	s32 entry_ndx;
+	s32 itmp_entry_ndx = 0;	/* DSP-MMU TLB entry base address */
 	struct cfg_hostres *resources = NULL;
 	u32 temp;
 	u32 ul_dsp_clk_rate;
@@ -305,12 +399,12 @@
 	ul_shm_base_virt *= DSPWORDSIZE;
 	DBC_ASSERT(ul_shm_base_virt != 0);
 	/* DSP Virtual address */
-	ul_tlb_base_virt = dev_context->sh_s.seg0_da;
+	ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
 	DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
 	ul_shm_offset_virt =
 	    ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
 	/* Kernel logical address */
-	ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt;
+	ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
 
 	DBC_ASSERT(ul_shm_base != 0);
 	/* 2nd wd is used as sync field */
@@ -345,83 +439,78 @@
 					OMAP343X_CONTROL_IVA2_BOOTMOD));
 		}
 	}
-
 	if (!status) {
+		/* Reset and Unreset the RST2, so that BOOTADDR is copied to
+		 * IVA2 SYSC register */
+		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
+			OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+		udelay(100);
 		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
 					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
-		mmu = dev_context->dsp_mmu;
-		if (mmu)
-			dsp_mmu_exit(mmu);
-		mmu = dsp_mmu_init();
-		if (IS_ERR(mmu)) {
-			dev_err(bridge, "dsp_mmu_init failed!\n");
-			dev_context->dsp_mmu = NULL;
-			status = (int)mmu;
-		}
-	}
-	if (!status) {
-		dev_context->dsp_mmu = mmu;
-		sm_sg = &dev_context->sh_s;
-		sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
-			sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-		if (IS_ERR_VALUE(sg0_da)) {
-			status = (int)sg0_da;
-			sg0_da = 0;
-		}
-	}
-	if (!status) {
-		sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
-			sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-		if (IS_ERR_VALUE(sg1_da)) {
-			status = (int)sg1_da;
-			sg1_da = 0;
-		}
-	}
-	if (!status) {
-		u32 da;
-		for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
-			if (!tlb[tlb_i].ul_gpp_pa)
+		udelay(100);
+
+		/* Disbale the DSP MMU */
+		hw_mmu_disable(resources->dw_dmmu_base);
+		/* Disable TWL */
+		hw_mmu_twl_disable(resources->dw_dmmu_base);
+
+		/* Only make TLB entry if both addresses are non-zero */
+		for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
+		     entry_ndx++) {
+			struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
+			struct hw_mmu_map_attrs_t map_attrs = {
+				.endianism = e->endianism,
+				.element_size = e->elem_size,
+				.mixed_size = e->mixed_mode,
+			};
+
+			if (!e->ul_gpp_pa || !e->ul_dsp_va)
 				continue;
 
-			dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size"
-				" 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa,
-				tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size);
+			dev_dbg(bridge,
+					"MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
+					itmp_entry_ndx,
+					e->ul_gpp_pa,
+					e->ul_dsp_va,
+					e->ul_size);
 
-			da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va,
-				tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
-				IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-			if (IS_ERR_VALUE(da)) {
-				status = (int)da;
-				break;
-			}
-		}
-	}
-	if (!status) {
-		u32 da;
-		l4_i = 0;
-		while (l4_peripheral_table[l4_i].phys_addr) {
-			da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
-				dsp_virt_addr, l4_peripheral_table[l4_i].
-				phys_addr, PAGE_SIZE,
-				IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-			if (IS_ERR_VALUE(da)) {
-				status = (int)da;
-				break;
-			}
-			l4_i++;
+			hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
+					e->ul_gpp_pa,
+					e->ul_dsp_va,
+					e->ul_size,
+					itmp_entry_ndx,
+					&map_attrs, 1, 1);
+
+			itmp_entry_ndx++;
 		}
 	}
 
 	/* Lock the above TLB entries and get the BIOS and load monitor timer
 	 * information */
 	if (!status) {
+		hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
+		hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
+		hw_mmu_ttb_set(resources->dw_dmmu_base,
+			       dev_context->pt_attrs->l1_base_pa);
+		hw_mmu_twl_enable(resources->dw_dmmu_base);
+		/* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
+
+		temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
+		temp = (temp & 0xFFFFFFEF) | 0x11;
+		__raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
+
+		/* Let the DSP MMU run */
+		hw_mmu_enable(resources->dw_dmmu_base);
+
 		/* Enable the BIOS clock */
 		(void)dev_get_symbol(dev_context->hdev_obj,
 				     BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
 		(void)dev_get_symbol(dev_context->hdev_obj,
 				     BRIDGEINIT_LOADMON_GPTIMER,
 				     &ul_load_monitor_timer);
+	}
 
+	if (!status) {
 		if (ul_load_monitor_timer != 0xFFFF) {
 			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 			    ul_load_monitor_timer;
@@ -430,7 +519,9 @@
 			dev_dbg(bridge, "Not able to get the symbol for Load "
 				"Monitor Timer\n");
 		}
+	}
 
+	if (!status) {
 		if (ul_bios_gp_timer != 0xFFFF) {
 			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 			    ul_bios_gp_timer;
@@ -439,7 +530,9 @@
 			dev_dbg(bridge,
 				"Not able to get the symbol for BIOS Timer\n");
 		}
+	}
 
+	if (!status) {
 		/* Set the DSP clock rate */
 		(void)dev_get_symbol(dev_context->hdev_obj,
 				     "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -492,6 +585,9 @@
 
 		/* Let DSP go */
 		dev_dbg(bridge, "%s Unreset\n", __func__);
+		/* Enable DSP MMU Interrupts */
+		hw_mmu_event_enable(resources->dw_dmmu_base,
+				    HW_MMU_ALL_INTERRUPTS);
 		/* release the RST1, DSP starts executing now .. */
 		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
 					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -521,23 +617,11 @@
 
 			/* update board state */
 			dev_context->dw_brd_state = BRD_RUNNING;
-			return 0;
+			/* (void)chnlsm_enable_interrupt(dev_context); */
 		} else {
 			dev_context->dw_brd_state = BRD_UNKNOWN;
 		}
 	}
-
-	while (tlb_i--) {
-		if (!tlb[tlb_i].ul_gpp_pa)
-			continue;
-		iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
-	}
-	while (l4_i--)
-		iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
-	if (sg0_da)
-		iommu_kunmap(mmu, sg0_da);
-	if (sg1_da)
-		iommu_kunmap(mmu, sg1_da);
 	return status;
 }
 
@@ -553,9 +637,8 @@
 {
 	int status = 0;
 	struct bridge_dev_context *dev_context = dev_ctxt;
+	struct pg_table_attrs *pt_attrs;
 	u32 dsp_pwr_state;
-	int i;
-	struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
 	struct omap_dsp_platform_data *pdata =
 		omap_dspbridge_dev->dev.platform_data;
 
@@ -591,37 +674,23 @@
 
 	dsp_wdt_enable(false);
 
-	/* Reset DSP */
-	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
-		OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
-
+	/* This is a good place to clear the MMU page tables as well */
+	if (dev_context->pt_attrs) {
+		pt_attrs = dev_context->pt_attrs;
+		memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
+		memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
+		memset((u8 *) pt_attrs->pg_info, 0x00,
+		       (pt_attrs->l2_num_pages * sizeof(struct page_info)));
+	}
 	/* Disable the mailbox interrupts */
 	if (dev_context->mbox) {
 		omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
 		omap_mbox_put(dev_context->mbox);
 		dev_context->mbox = NULL;
 	}
-	if (dev_context->dsp_mmu) {
-		pr_err("Proc stop mmu if statement\n");
-		for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) {
-			if (!tlb[i].ul_gpp_pa)
-				continue;
-			iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
-		}
-		i = 0;
-		while (l4_peripheral_table[i].phys_addr) {
-			iommu_kunmap(dev_context->dsp_mmu,
-				l4_peripheral_table[i].dsp_virt_addr);
-			i++;
-		}
-		iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
-		iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
-		dsp_mmu_exit(dev_context->dsp_mmu);
-		dev_context->dsp_mmu = NULL;
-	}
-	/* Reset IVA IOMMU*/
-	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
-		OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+	/* Reset IVA2 clocks*/
+	(*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
+			OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 
 	dsp_clock_disable_all(dev_context->dsp_per_clks);
 	dsp_clk_disable(DSP_CLK_IVA2);
@@ -681,6 +750,10 @@
 	struct bridge_dev_context *dev_context = NULL;
 	s32 entry_ndx;
 	struct cfg_hostres *resources = config_param;
+	struct pg_table_attrs *pt_attrs;
+	u32 pg_tbl_pa;
+	u32 pg_tbl_va;
+	u32 align_size;
 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
 
 	/* Allocate and initialize a data structure to contain the bridge driver
@@ -711,8 +784,97 @@
 	if (!dev_context->dw_dsp_base_addr)
 		status = -EPERM;
 
+	pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
+	if (pt_attrs != NULL) {
+		/* Assuming that we use only DSP's memory map
+		 * until 0x4000:0000 , we would need only 1024
+		 * L1 enties i.e L1 size = 4K */
+		pt_attrs->l1_size = 0x1000;
+		align_size = pt_attrs->l1_size;
+		/* Align sizes are expected to be power of 2 */
+		/* we like to get aligned on L1 table size */
+		pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
+						     align_size, &pg_tbl_pa);
+
+		/* Check if the PA is aligned for us */
+		if ((pg_tbl_pa) & (align_size - 1)) {
+			/* PA not aligned to page table size ,
+			 * try with more allocation and align */
+			mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
+					  pt_attrs->l1_size);
+			/* we like to get aligned on L1 table size */
+			pg_tbl_va =
+			    (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
+						     align_size, &pg_tbl_pa);
+			/* We should be able to get aligned table now */
+			pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+			pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+			pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
+			/* Align the PA to the next 'align'  boundary */
+			pt_attrs->l1_base_pa =
+			    ((pg_tbl_pa) +
+			     (align_size - 1)) & (~(align_size - 1));
+			pt_attrs->l1_base_va =
+			    pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
+		} else {
+			/* We got aligned PA, cool */
+			pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+			pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+			pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
+			pt_attrs->l1_base_pa = pg_tbl_pa;
+			pt_attrs->l1_base_va = pg_tbl_va;
+		}
+		if (pt_attrs->l1_base_va)
+			memset((u8 *) pt_attrs->l1_base_va, 0x00,
+			       pt_attrs->l1_size);
+
+		/* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
+		 * L4 pages */
+		pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
+		pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
+		    pt_attrs->l2_num_pages;
+		align_size = 4;	/* Make it u32 aligned */
+		/* we like to get aligned on L1 table size */
+		pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
+						     align_size, &pg_tbl_pa);
+		pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
+		pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
+		pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
+		pt_attrs->l2_base_pa = pg_tbl_pa;
+		pt_attrs->l2_base_va = pg_tbl_va;
+
+		if (pt_attrs->l2_base_va)
+			memset((u8 *) pt_attrs->l2_base_va, 0x00,
+			       pt_attrs->l2_size);
+
+		pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
+					sizeof(struct page_info), GFP_KERNEL);
+		dev_dbg(bridge,
+			"L1 pa %x, va %x, size %x\n L2 pa %x, va "
+			"%x, size %x\n", pt_attrs->l1_base_pa,
+			pt_attrs->l1_base_va, pt_attrs->l1_size,
+			pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
+			pt_attrs->l2_size);
+		dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
+			pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
+	}
+	if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
+	    (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
+		dev_context->pt_attrs = pt_attrs;
+	else
+		status = -ENOMEM;
+
 	if (!status) {
+		spin_lock_init(&pt_attrs->pg_lock);
 		dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
+
+		/* Set the Clock Divisor for the DSP module */
+		udelay(5);
+		/* MMU address is obtained from the host
+		 * resources struct */
+		dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
+	}
+	if (!status) {
 		dev_context->hdev_obj = hdev_obj;
 		/* Store current board state. */
 		dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -722,6 +884,23 @@
 		/* Return ptr to our device state to the DSP API for storage */
 		*dev_cntxt = dev_context;
 	} else {
+		if (pt_attrs != NULL) {
+			kfree(pt_attrs->pg_info);
+
+			if (pt_attrs->l2_tbl_alloc_va) {
+				mem_free_phys_mem((void *)
+						  pt_attrs->l2_tbl_alloc_va,
+						  pt_attrs->l2_tbl_alloc_pa,
+						  pt_attrs->l2_tbl_alloc_sz);
+			}
+			if (pt_attrs->l1_tbl_alloc_va) {
+				mem_free_phys_mem((void *)
+						  pt_attrs->l1_tbl_alloc_va,
+						  pt_attrs->l1_tbl_alloc_pa,
+						  pt_attrs->l1_tbl_alloc_sz);
+			}
+		}
+		kfree(pt_attrs);
 		kfree(dev_context);
 	}
 func_end:
@@ -789,6 +968,7 @@
  */
 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
 {
+	struct pg_table_attrs *pt_attrs;
 	int status = 0;
 	struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
 	    dev_ctxt;
@@ -802,6 +982,23 @@
 
 	/* first put the device to stop state */
 	bridge_brd_stop(dev_context);
+	if (dev_context->pt_attrs) {
+		pt_attrs = dev_context->pt_attrs;
+		kfree(pt_attrs->pg_info);
+
+		if (pt_attrs->l2_tbl_alloc_va) {
+			mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
+					  pt_attrs->l2_tbl_alloc_pa,
+					  pt_attrs->l2_tbl_alloc_sz);
+		}
+		if (pt_attrs->l1_tbl_alloc_va) {
+			mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
+					  pt_attrs->l1_tbl_alloc_pa,
+					  pt_attrs->l1_tbl_alloc_sz);
+		}
+		kfree(pt_attrs);
+
+	}
 
 	if (dev_context->resources) {
 		host_res = dev_context->resources;
@@ -832,6 +1029,8 @@
 			iounmap((void *)host_res->dw_mem_base[3]);
 		if (host_res->dw_mem_base[4])
 			iounmap((void *)host_res->dw_mem_base[4]);
+		if (host_res->dw_dmmu_base)
+			iounmap(host_res->dw_dmmu_base);
 		if (host_res->dw_per_base)
 			iounmap(host_res->dw_per_base);
 		if (host_res->dw_per_pm_base)
@@ -845,6 +1044,7 @@
 		host_res->dw_mem_base[2] = (u32) NULL;
 		host_res->dw_mem_base[3] = (u32) NULL;
 		host_res->dw_mem_base[4] = (u32) NULL;
+		host_res->dw_dmmu_base = NULL;
 		host_res->dw_sys_ctrl_base = NULL;
 
 		kfree(host_res);
@@ -928,6 +1128,673 @@
 }
 
 /*
+ *  ======== bridge_brd_mem_map ========
+ *      This function maps MPU buffer to the DSP address space. It performs
+ *  linear to physical address translation if required. It translates each
+ *  page since linear addresses can be physically non-contiguous
+ *  All address & size arguments are assumed to be page aligned (in proc.c)
+ *
+ *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
+ */
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes, u32 ul_map_attr,
+				  struct page **mapped_pages)
+{
+	u32 attrs;
+	int status = 0;
+	struct bridge_dev_context *dev_context = dev_ctxt;
+	struct hw_mmu_map_attrs_t hw_attrs;
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	u32 write = 0;
+	u32 num_usr_pgs = 0;
+	struct page *mapped_page, *pg;
+	s32 pg_num;
+	u32 va = virt_addr;
+	struct task_struct *curr_task = current;
+	u32 pg_i = 0;
+	u32 mpu_addr, pa;
+
+	dev_dbg(bridge,
+		"%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
+		__func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
+		ul_map_attr);
+	if (ul_num_bytes == 0)
+		return -EINVAL;
+
+	if (ul_map_attr & DSP_MAP_DIR_MASK) {
+		attrs = ul_map_attr;
+	} else {
+		/* Assign default attributes */
+		attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
+	}
+	/* Take mapping properties */
+	if (attrs & DSP_MAPBIGENDIAN)
+		hw_attrs.endianism = HW_BIG_ENDIAN;
+	else
+		hw_attrs.endianism = HW_LITTLE_ENDIAN;
+
+	hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
+	    ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
+	/* Ignore element_size if mixed_size is enabled */
+	if (hw_attrs.mixed_size == 0) {
+		if (attrs & DSP_MAPELEMSIZE8) {
+			/* Size is 8 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE8BIT;
+		} else if (attrs & DSP_MAPELEMSIZE16) {
+			/* Size is 16 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE16BIT;
+		} else if (attrs & DSP_MAPELEMSIZE32) {
+			/* Size is 32 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE32BIT;
+		} else if (attrs & DSP_MAPELEMSIZE64) {
+			/* Size is 64 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE64BIT;
+		} else {
+			/*
+			 * Mixedsize isn't enabled, so size can't be
+			 * zero here
+			 */
+			return -EINVAL;
+		}
+	}
+	if (attrs & DSP_MAPDONOTLOCK)
+		hw_attrs.donotlockmpupage = 1;
+	else
+		hw_attrs.donotlockmpupage = 0;
+
+	if (attrs & DSP_MAPVMALLOCADDR) {
+		return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
+				       ul_num_bytes, &hw_attrs);
+	}
+	/*
+	 * Do OS-specific user-va to pa translation.
+	 * Combine physically contiguous regions to reduce TLBs.
+	 * Pass the translated pa to pte_update.
+	 */
+	if ((attrs & DSP_MAPPHYSICALADDR)) {
+		status = pte_update(dev_context, ul_mpu_addr, virt_addr,
+				    ul_num_bytes, &hw_attrs);
+		goto func_cont;
+	}
+
+	/*
+	 * Important Note: ul_mpu_addr is mapped from user application process
+	 * to current process - it must lie completely within the current
+	 * virtual memory address space in order to be of use to us here!
+	 */
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, ul_mpu_addr);
+	if (vma)
+		dev_dbg(bridge,
+			"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
+			"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+			ul_num_bytes, vma->vm_start, vma->vm_end,
+			vma->vm_flags);
+
+	/*
+	 * It is observed that under some circumstances, the user buffer is
+	 * spread across several VMAs. So loop through and check if the entire
+	 * user buffer is covered
+	 */
+	while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
+		/* jump to the next VMA region */
+		vma = find_vma(mm, vma->vm_end + 1);
+		dev_dbg(bridge,
+			"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
+			"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+			ul_num_bytes, vma->vm_start, vma->vm_end,
+			vma->vm_flags);
+	}
+	if (!vma) {
+		pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
+		       __func__, ul_mpu_addr, ul_num_bytes);
+		status = -EINVAL;
+		up_read(&mm->mmap_sem);
+		goto func_cont;
+	}
+
+	if (vma->vm_flags & VM_IO) {
+		num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+		mpu_addr = ul_mpu_addr;
+
+		/* Get the physical addresses for user buffer */
+		for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+			pa = user_va2_pa(mm, mpu_addr);
+			if (!pa) {
+				status = -EPERM;
+				pr_err("DSPBRIDGE: VM_IO mapping physical"
+				       "address is invalid\n");
+				break;
+			}
+			if (pfn_valid(__phys_to_pfn(pa))) {
+				pg = PHYS_TO_PAGE(pa);
+				get_page(pg);
+				if (page_count(pg) < 1) {
+					pr_err("Bad page in VM_IO buffer\n");
+					bad_page_dump(pa, pg);
+				}
+			}
+			status = pte_set(dev_context->pt_attrs, pa,
+					 va, HW_PAGE_SIZE4KB, &hw_attrs);
+			if (status)
+				break;
+
+			va += HW_PAGE_SIZE4KB;
+			mpu_addr += HW_PAGE_SIZE4KB;
+			pa += HW_PAGE_SIZE4KB;
+		}
+	} else {
+		num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+		if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+			write = 1;
+
+		for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+			pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
+						write, 1, &mapped_page, NULL);
+			if (pg_num > 0) {
+				if (page_count(mapped_page) < 1) {
+					pr_err("Bad page count after doing"
+					       "get_user_pages on"
+					       "user buffer\n");
+					bad_page_dump(page_to_phys(mapped_page),
+						      mapped_page);
+				}
+				status = pte_set(dev_context->pt_attrs,
+						 page_to_phys(mapped_page), va,
+						 HW_PAGE_SIZE4KB, &hw_attrs);
+				if (status)
+					break;
+
+				if (mapped_pages)
+					mapped_pages[pg_i] = mapped_page;
+
+				va += HW_PAGE_SIZE4KB;
+				ul_mpu_addr += HW_PAGE_SIZE4KB;
+			} else {
+				pr_err("DSPBRIDGE: get_user_pages FAILED,"
+				       "MPU addr = 0x%x,"
+				       "vma->vm_flags = 0x%lx,"
+				       "get_user_pages Err"
+				       "Value = %d, Buffer"
+				       "size=0x%x\n", ul_mpu_addr,
+				       vma->vm_flags, pg_num, ul_num_bytes);
+				status = -EPERM;
+				break;
+			}
+		}
+	}
+	up_read(&mm->mmap_sem);
+func_cont:
+	if (status) {
+		/*
+		 * Roll out the mapped pages incase it failed in middle of
+		 * mapping
+		 */
+		if (pg_i) {
+			bridge_brd_mem_un_map(dev_context, virt_addr,
+					   (pg_i * PG_SIZE4K));
+		}
+		status = -EPERM;
+	}
+	/*
+	 * In any case, flush the TLB
+	 * This is called from here instead from pte_update to avoid unnecessary
+	 * repetition while mapping non-contiguous physical regions of a virtual
+	 * region
+	 */
+	flush_all(dev_context);
+	dev_dbg(bridge, "%s status %x\n", __func__, status);
+	return status;
+}
+
+/*
+ *  ======== bridge_brd_mem_un_map ========
+ *      Invalidate the PTEs for the DSP VA block to be unmapped.
+ *
+ *      PTEs of a mapped memory block are contiguous in any page table
+ *      So, instead of looking up the PTE address for every 4K block,
+ *      we clear consecutive PTEs until we unmap all the bytes
+ */
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+				     u32 virt_addr, u32 ul_num_bytes)
+{
+	u32 l1_base_va;
+	u32 l2_base_va;
+	u32 l2_base_pa;
+	u32 l2_page_num;
+	u32 pte_val;
+	u32 pte_size;
+	u32 pte_count;
+	u32 pte_addr_l1;
+	u32 pte_addr_l2 = 0;
+	u32 rem_bytes;
+	u32 rem_bytes_l2;
+	u32 va_curr;
+	struct page *pg = NULL;
+	int status = 0;
+	struct bridge_dev_context *dev_context = dev_ctxt;
+	struct pg_table_attrs *pt = dev_context->pt_attrs;
+	u32 temp;
+	u32 paddr;
+	u32 numof4k_pages = 0;
+
+	va_curr = virt_addr;
+	rem_bytes = ul_num_bytes;
+	rem_bytes_l2 = 0;
+	l1_base_va = pt->l1_base_va;
+	pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+	dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
+		"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
+		ul_num_bytes, l1_base_va, pte_addr_l1);
+
+	while (rem_bytes && !status) {
+		u32 va_curr_orig = va_curr;
+		/* Find whether the L1 PTE points to a valid L2 PT */
+		pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+		pte_val = *(u32 *) pte_addr_l1;
+		pte_size = hw_mmu_pte_size_l1(pte_val);
+
+		if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
+			goto skip_coarse_page;
+
+		/*
+		 * Get the L2 PA from the L1 PTE, and find
+		 * corresponding L2 VA
+		 */
+		l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+		l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+		l2_page_num =
+		    (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+		/*
+		 * Find the L2 PTE address from which we will start
+		 * clearing, the number of PTEs to be cleared on this
+		 * page, and the size of VA space that needs to be
+		 * cleared on this L2 page
+		 */
+		pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
+		pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
+		pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
+		if (rem_bytes < (pte_count * PG_SIZE4K))
+			pte_count = rem_bytes / PG_SIZE4K;
+		rem_bytes_l2 = pte_count * PG_SIZE4K;
+
+		/*
+		 * Unmap the VA space on this L2 PT. A quicker way
+		 * would be to clear pte_count entries starting from
+		 * pte_addr_l2. However, below code checks that we don't
+		 * clear invalid entries or less than 64KB for a 64KB
+		 * entry. Similar checking is done for L1 PTEs too
+		 * below
+		 */
+		while (rem_bytes_l2 && !status) {
+			pte_val = *(u32 *) pte_addr_l2;
+			pte_size = hw_mmu_pte_size_l2(pte_val);
+			/* va_curr aligned to pte_size? */
+			if (pte_size == 0 || rem_bytes_l2 < pte_size ||
+			    va_curr & (pte_size - 1)) {
+				status = -EPERM;
+				break;
+			}
+
+			/* Collect Physical addresses from VA */
+			paddr = (pte_val & ~(pte_size - 1));
+			if (pte_size == HW_PAGE_SIZE64KB)
+				numof4k_pages = 16;
+			else
+				numof4k_pages = 1;
+			temp = 0;
+			while (temp++ < numof4k_pages) {
+				if (!pfn_valid(__phys_to_pfn(paddr))) {
+					paddr += HW_PAGE_SIZE4KB;
+					continue;
+				}
+				pg = PHYS_TO_PAGE(paddr);
+				if (page_count(pg) < 1) {
+					pr_info("DSPBRIDGE: UNMAP function: "
+						"COUNT 0 FOR PA 0x%x, size = "
+						"0x%x\n", paddr, ul_num_bytes);
+					bad_page_dump(paddr, pg);
+				} else {
+					set_page_dirty(pg);
+					page_cache_release(pg);
+				}
+				paddr += HW_PAGE_SIZE4KB;
+			}
+			if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
+				status = -EPERM;
+				goto EXIT_LOOP;
+			}
+
+			status = 0;
+			rem_bytes_l2 -= pte_size;
+			va_curr += pte_size;
+			pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
+		}
+		spin_lock(&pt->pg_lock);
+		if (rem_bytes_l2 == 0) {
+			pt->pg_info[l2_page_num].num_entries -= pte_count;
+			if (pt->pg_info[l2_page_num].num_entries == 0) {
+				/*
+				 * Clear the L1 PTE pointing to the L2 PT
+				 */
+				if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
+						     HW_MMU_COARSE_PAGE_SIZE))
+					status = 0;
+				else {
+					status = -EPERM;
+					spin_unlock(&pt->pg_lock);
+					goto EXIT_LOOP;
+				}
+			}
+			rem_bytes -= pte_count * PG_SIZE4K;
+		} else
+			status = -EPERM;
+
+		spin_unlock(&pt->pg_lock);
+		continue;
+skip_coarse_page:
+		/* va_curr aligned to pte_size? */
+		/* pte_size = 1 MB or 16 MB */
+		if (pte_size == 0 || rem_bytes < pte_size ||
+		    va_curr & (pte_size - 1)) {
+			status = -EPERM;
+			break;
+		}
+
+		if (pte_size == HW_PAGE_SIZE1MB)
+			numof4k_pages = 256;
+		else
+			numof4k_pages = 4096;
+		temp = 0;
+		/* Collect Physical addresses from VA */
+		paddr = (pte_val & ~(pte_size - 1));
+		while (temp++ < numof4k_pages) {
+			if (pfn_valid(__phys_to_pfn(paddr))) {
+				pg = PHYS_TO_PAGE(paddr);
+				if (page_count(pg) < 1) {
+					pr_info("DSPBRIDGE: UNMAP function: "
+						"COUNT 0 FOR PA 0x%x, size = "
+						"0x%x\n", paddr, ul_num_bytes);
+					bad_page_dump(paddr, pg);
+				} else {
+					set_page_dirty(pg);
+					page_cache_release(pg);
+				}
+			}
+			paddr += HW_PAGE_SIZE4KB;
+		}
+		if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
+			status = 0;
+			rem_bytes -= pte_size;
+			va_curr += pte_size;
+		} else {
+			status = -EPERM;
+			goto EXIT_LOOP;
+		}
+	}
+	/*
+	 * It is better to flush the TLB here, so that any stale old entries
+	 * get flushed
+	 */
+EXIT_LOOP:
+	flush_all(dev_context);
+	dev_dbg(bridge,
+		"%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
+		" rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
+		pte_addr_l2, rem_bytes, rem_bytes_l2, status);
+	return status;
+}
+
+/*
+ *  ======== user_va2_pa ========
+ *  Purpose:
+ *      This function walks through the page tables to convert a userland
+ *      virtual address to physical address
+ */
+static u32 user_va2_pa(struct mm_struct *mm, u32 address)
+{
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *ptep, pte;
+
+	pgd = pgd_offset(mm, address);
+	if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+		pmd = pmd_offset(pgd, address);
+		if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+			ptep = pte_offset_map(pmd, address);
+			if (ptep) {
+				pte = *ptep;
+				if (pte_present(pte))
+					return pte & PAGE_MASK;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *  ======== pte_update ========
+ *      This function calculates the optimum page-aligned addresses and sizes
+ *      Caller must pass page-aligned values
+ */
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+			     u32 va, u32 size,
+			     struct hw_mmu_map_attrs_t *map_attrs)
+{
+	u32 i;
+	u32 all_bits;
+	u32 pa_curr = pa;
+	u32 va_curr = va;
+	u32 num_bytes = size;
+	struct bridge_dev_context *dev_context = dev_ctxt;
+	int status = 0;
+	u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+		HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+	};
+
+	while (num_bytes && !status) {
+		/* To find the max. page size with which both PA & VA are
+		 * aligned */
+		all_bits = pa_curr | va_curr;
+
+		for (i = 0; i < 4; i++) {
+			if ((num_bytes >= page_size[i]) && ((all_bits &
+							     (page_size[i] -
+							      1)) == 0)) {
+				status =
+				    pte_set(dev_context->pt_attrs, pa_curr,
+					    va_curr, page_size[i], map_attrs);
+				pa_curr += page_size[i];
+				va_curr += page_size[i];
+				num_bytes -= page_size[i];
+				/* Don't try smaller sizes. Hopefully we have
+				 * reached an address aligned to a bigger page
+				 * size */
+				break;
+			}
+		}
+	}
+
+	return status;
+}
+
+/*
+ *  ======== pte_set ========
+ *      This function calculates PTE address (MPU virtual) to be updated
+ *      It also manages the L2 page tables
+ */
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+			  u32 size, struct hw_mmu_map_attrs_t *attrs)
+{
+	u32 i;
+	u32 pte_val;
+	u32 pte_addr_l1;
+	u32 pte_size;
+	/* Base address of the PT that will be updated */
+	u32 pg_tbl_va;
+	u32 l1_base_va;
+	/* Compiler warns that the next three variables might be used
+	 * uninitialized in this function. Doesn't seem so. Working around,
+	 * anyways. */
+	u32 l2_base_va = 0;
+	u32 l2_base_pa = 0;
+	u32 l2_page_num = 0;
+	int status = 0;
+
+	l1_base_va = pt->l1_base_va;
+	pg_tbl_va = l1_base_va;
+	if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
+		/* Find whether the L1 PTE points to a valid L2 PT */
+		pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
+		if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
+			pte_val = *(u32 *) pte_addr_l1;
+			pte_size = hw_mmu_pte_size_l1(pte_val);
+		} else {
+			return -EPERM;
+		}
+		spin_lock(&pt->pg_lock);
+		if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
+			/* Get the L2 PA from the L1 PTE, and find
+			 * corresponding L2 VA */
+			l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+			l2_base_va =
+			    l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+			l2_page_num =
+			    (l2_base_pa -
+			     pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+		} else if (pte_size == 0) {
+			/* L1 PTE is invalid. Allocate a L2 PT and
+			 * point the L1 PTE to it */
+			/* Find a free L2 PT. */
+			for (i = 0; (i < pt->l2_num_pages) &&
+			     (pt->pg_info[i].num_entries != 0); i++)
+				;;
+			if (i < pt->l2_num_pages) {
+				l2_page_num = i;
+				l2_base_pa = pt->l2_base_pa + (l2_page_num *
+						HW_MMU_COARSE_PAGE_SIZE);
+				l2_base_va = pt->l2_base_va + (l2_page_num *
+						HW_MMU_COARSE_PAGE_SIZE);
+				/* Endianness attributes are ignored for
+				 * HW_MMU_COARSE_PAGE_SIZE */
+				status =
+				    hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
+						   HW_MMU_COARSE_PAGE_SIZE,
+						   attrs);
+			} else {
+				status = -ENOMEM;
+			}
+		} else {
+			/* Found valid L1 PTE of another size.
+			 * Should not overwrite it. */
+			status = -EPERM;
+		}
+		if (!status) {
+			pg_tbl_va = l2_base_va;
+			if (size == HW_PAGE_SIZE64KB)
+				pt->pg_info[l2_page_num].num_entries += 16;
+			else
+				pt->pg_info[l2_page_num].num_entries++;
+			dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
+				"%x, num_entries %x\n", l2_base_va,
+				l2_base_pa, l2_page_num,
+				pt->pg_info[l2_page_num].num_entries);
+		}
+		spin_unlock(&pt->pg_lock);
+	}
+	if (!status) {
+		dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
+			pg_tbl_va, pa, va, size);
+		dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
+			"mixed_size %x\n", attrs->endianism,
+			attrs->element_size, attrs->mixed_size);
+		status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
+	}
+
+	return status;
+}
+
+/* Memory map kernel VA -- memory allocated with vmalloc */
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes,
+				  struct hw_mmu_map_attrs_t *hw_attrs)
+{
+	int status = 0;
+	struct page *page[1];
+	u32 i;
+	u32 pa_curr;
+	u32 pa_next;
+	u32 va_curr;
+	u32 size_curr;
+	u32 num_pages;
+	u32 pa;
+	u32 num_of4k_pages;
+	u32 temp = 0;
+
+	/*
+	 * Do Kernel va to pa translation.
+	 * Combine physically contiguous regions to reduce TLBs.
+	 * Pass the translated pa to pte_update.
+	 */
+	num_pages = ul_num_bytes / PAGE_SIZE;	/* PAGE_SIZE = OS page size */
+	i = 0;
+	va_curr = ul_mpu_addr;
+	page[0] = vmalloc_to_page((void *)va_curr);
+	pa_next = page_to_phys(page[0]);
+	while (!status && (i < num_pages)) {
+		/*
+		 * Reuse pa_next from the previous iteraion to avoid
+		 * an extra va2pa call
+		 */
+		pa_curr = pa_next;
+		size_curr = PAGE_SIZE;
+		/*
+		 * If the next page is physically contiguous,
+		 * map it with the current one by increasing
+		 * the size of the region to be mapped
+		 */
+		while (++i < num_pages) {
+			page[0] =
+			    vmalloc_to_page((void *)(va_curr + size_curr));
+			pa_next = page_to_phys(page[0]);
+
+			if (pa_next == (pa_curr + size_curr))
+				size_curr += PAGE_SIZE;
+			else
+				break;
+
+		}
+		if (pa_next == 0) {
+			status = -ENOMEM;
+			break;
+		}
+		pa = pa_curr;
+		num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
+		while (temp++ < num_of4k_pages) {
+			get_page(PHYS_TO_PAGE(pa));
+			pa += HW_PAGE_SIZE4KB;
+		}
+		status = pte_update(dev_context, pa_curr, virt_addr +
+				    (va_curr - ul_mpu_addr), size_curr,
+				    hw_attrs);
+		va_curr += size_curr;
+	}
+	/*
+	 * In any case, flush the TLB
+	 * This is called from here instead from pte_update to avoid unnecessary
+	 * repetition while mapping non-contiguous physical regions of a virtual
+	 * region
+	 */
+	flush_all(dev_context);
+	dev_dbg(bridge, "%s status %x\n", __func__, status);
+	return status;
+}
+
+/*
  *  ======== wait_for_start ========
  *      Wait for the singal from DSP that it has started, or time out.
  */
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index b57a9fd..fb9026e 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -31,6 +31,10 @@
 #include <dspbridge/dev.h>
 #include <dspbridge/iodefs.h>
 
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 #include <dspbridge/pwr_sh.h>
 
 /*  ----------------------------------- Bridge Driver */
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 66dbf02..ba29610 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -134,16 +134,17 @@
 
 		if (!status) {
 			ul_tlb_base_virt =
-			    dev_context->sh_s.seg0_da * DSPWORDSIZE;
+			    dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
 			DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
-			dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va;
+			dw_ext_prog_virt_mem =
+			    dev_context->atlb_entry[0].ul_gpp_va;
 
 			if (!trace_read) {
 				ul_shm_offset_virt =
 				    ul_shm_base_virt - ul_tlb_base_virt;
 				ul_shm_offset_virt +=
 				    PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
-						  1, PAGE_SIZE * 16);
+						  1, HW_PAGE_SIZE64KB);
 				dw_ext_prog_virt_mem -= ul_shm_offset_virt;
 				dw_ext_prog_virt_mem +=
 				    (ul_ext_base - ul_dyn_ext_base);
@@ -317,9 +318,8 @@
 			ret = -EPERM;
 
 		if (!ret) {
-			ul_tlb_base_virt = dev_context->sh_s.seg0_da *
-								DSPWORDSIZE;
-
+			ul_tlb_base_virt =
+			    dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
 			DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
 
 			if (symbols_reloaded) {
@@ -337,7 +337,7 @@
 			    ul_shm_base_virt - ul_tlb_base_virt;
 			if (trace_load) {
 				dw_ext_prog_virt_mem =
-					dev_context->sh_s.seg0_va;
+				    dev_context->atlb_entry[0].ul_gpp_va;
 			} else {
 				dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
 				dw_ext_prog_virt_mem +=
@@ -393,6 +393,7 @@
 		omap_dspbridge_dev->dev.platform_data;
 	struct cfg_hostres *resources = dev_context->resources;
 	int status = 0;
+	u32 temp;
 
 	if (!dev_context->mbox)
 		return 0;
@@ -436,7 +437,7 @@
 		omap_mbox_restore_ctx(dev_context->mbox);
 
 		/* Access MMU SYS CONFIG register to generate a short wakeup */
-		iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG);
+		temp = readl(resources->dw_dmmu_base + 0x10);
 
 		dev_context->dw_brd_state = BRD_RUNNING;
 	} else if (dev_context->dw_brd_state == BRD_RETENTION) {
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index e24ea0c..3430418 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -31,6 +31,57 @@
 #include <dspbridge/drv.h>
 #include <dspbridge/wdt.h>
 
+static u32 fault_addr;
+
+static void mmu_fault_dpc(unsigned long data)
+{
+	struct deh_mgr *deh = (void *)data;
+
+	if (!deh)
+		return;
+
+	bridge_deh_notify(deh, DSP_MMUFAULT, 0);
+}
+
+static irqreturn_t mmu_fault_isr(int irq, void *data)
+{
+	struct deh_mgr *deh = data;
+	struct cfg_hostres *resources;
+	u32 event;
+
+	if (!deh)
+		return IRQ_HANDLED;
+
+	resources = deh->hbridge_context->resources;
+	if (!resources) {
+		dev_dbg(bridge, "%s: Failed to get Host Resources\n",
+				__func__);
+		return IRQ_HANDLED;
+	}
+
+	hw_mmu_event_status(resources->dw_dmmu_base, &event);
+	if (event == HW_MMU_TRANSLATION_FAULT) {
+		hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
+		dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
+				event, fault_addr);
+		/*
+		 * Schedule a DPC directly. In the future, it may be
+		 * necessary to check if DSP MMU fault is intended for
+		 * Bridge.
+		 */
+		tasklet_schedule(&deh->dpc_tasklet);
+
+		/* Disable the MMU events, else once we clear it will
+		 * start to raise INTs again */
+		hw_mmu_event_disable(resources->dw_dmmu_base,
+				HW_MMU_TRANSLATION_FAULT);
+	} else {
+		hw_mmu_event_disable(resources->dw_dmmu_base,
+				HW_MMU_ALL_INTERRUPTS);
+	}
+	return IRQ_HANDLED;
+}
+
 int bridge_deh_create(struct deh_mgr **ret_deh,
 		struct dev_object *hdev_obj)
 {
@@ -58,9 +109,18 @@
 	}
 	ntfy_init(deh->ntfy_obj);
 
+	/* Create a MMUfault DPC */
+	tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
+
 	/* Fill in context structure */
 	deh->hbridge_context = hbridge_context;
 
+	/* Install ISR function for DSP MMU fault */
+	status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
+			"DspBridge\tiommu fault", deh);
+	if (status < 0)
+		goto err;
+
 	*ret_deh = deh;
 	return 0;
 
@@ -80,6 +140,11 @@
 		ntfy_delete(deh->ntfy_obj);
 		kfree(deh->ntfy_obj);
 	}
+	/* Disable DSP MMU fault */
+	free_irq(INT_DSP_MMU_IRQ, deh);
+
+	/* Free DPC object */
+	tasklet_kill(&deh->dpc_tasklet);
 
 	/* Deallocate the DEH manager object */
 	kfree(deh);
@@ -101,6 +166,48 @@
 		return ntfy_unregister(deh->ntfy_obj, hnotification);
 }
 
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
+{
+	struct cfg_hostres *resources;
+	struct hw_mmu_map_attrs_t map_attrs = {
+		.endianism = HW_LITTLE_ENDIAN,
+		.element_size = HW_ELEM_SIZE16BIT,
+		.mixed_size = HW_MMU_CPUES,
+	};
+	void *dummy_va_addr;
+
+	resources = dev_context->resources;
+	dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
+
+	/*
+	 * Before acking the MMU fault, let's make sure MMU can only
+	 * access entry #0. Then add a new entry so that the DSP OS
+	 * can continue in order to dump the stack.
+	 */
+	hw_mmu_twl_disable(resources->dw_dmmu_base);
+	hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
+
+	hw_mmu_tlb_add(resources->dw_dmmu_base,
+			virt_to_phys(dummy_va_addr), fault_addr,
+			HW_PAGE_SIZE4KB, 1,
+			&map_attrs, HW_SET, HW_SET);
+
+	dsp_clk_enable(DSP_CLK_GPT8);
+
+	dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
+
+	/* Clear MMU interrupt */
+	hw_mmu_event_ack(resources->dw_dmmu_base,
+			HW_MMU_TRANSLATION_FAULT);
+	dump_dsp_stack(dev_context);
+	dsp_clk_disable(DSP_CLK_GPT8);
+
+	hw_mmu_disable(resources->dw_dmmu_base);
+	free_page((unsigned long)dummy_va_addr);
+}
+#endif
+
 static inline const char *event_to_string(int event)
 {
 	switch (event) {
@@ -133,7 +240,13 @@
 #endif
 		break;
 	case DSP_MMUFAULT:
-		dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info);
+		dev_err(bridge, "%s: %s, addr=0x%x", __func__,
+				str, fault_addr);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+		print_dsp_trace_buffer(dev_context);
+		dump_dl_modules(dev_context);
+		mmu_fault_print_stack(dev_context);
+#endif
 		break;
 	default:
 		dev_err(bridge, "%s: %s", __func__, str);
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
new file mode 100644
index 0000000..e48d7f6
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -0,0 +1,41 @@
+/*
+ * EasiGlobal.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _EASIGLOBAL_H
+#define _EASIGLOBAL_H
+#include <linux/types.h>
+
+/*
+ * DEFINE:        READ_ONLY, WRITE_ONLY &  READ_WRITE
+ *
+ * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
+ */
+
+#define READ_ONLY    1
+#define WRITE_ONLY   2
+#define READ_WRITE   3
+
+/*
+ * MACRO:        _DEBUG_LEVEL1_EASI
+ *
+ * DESCRIPTION:  A MACRO which can be used to indicate that a particular beach
+ *               register access function was called.
+ *
+ * NOTE:         We currently dont use this functionality.
+ */
+#define _DEBUG_LEVEL1_EASI(easi_num)     ((void)0)
+
+#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h
new file mode 100644
index 0000000..1cefca3
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h
@@ -0,0 +1,76 @@
+/*
+ * MMUAccInt.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_ACC_INT_H
+#define _MMU_ACC_INT_H
+
+/* Mappings of level 1 EASI function numbers to function names */
+
+#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
+#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32  (MMU_BASE_EASIL1 + 17)
+#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32    (MMU_BASE_EASIL1 + 39)
+#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32   (MMU_BASE_EASIL1 + 51)
+#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
+#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
+#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
+#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
+#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32   (MMU_BASE_EASIL1 + 180)
+#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32     (MMU_BASE_EASIL1 + 190)
+#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32   (MMU_BASE_EASIL1 + 194)
+#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32  (MMU_BASE_EASIL1 + 198)
+#define EASIL1_MMUMMU_LOCK_READ_REGISTER32   (MMU_BASE_EASIL1 + 203)
+#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32  (MMU_BASE_EASIL1 + 204)
+#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32  (MMU_BASE_EASIL1 + 205)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32  (MMU_BASE_EASIL1 + 212)
+#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32    (MMU_BASE_EASIL1 + 213)
+#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32   (MMU_BASE_EASIL1 + 214)
+#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32   (MMU_BASE_EASIL1 + 226)
+#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
+#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32  (MMU_BASE_EASIL1 + 322)
+
+/* Register offset address definitions */
+#define MMU_MMU_SYSCONFIG_OFFSET   0x10
+#define MMU_MMU_IRQSTATUS_OFFSET  0x18
+#define MMU_MMU_IRQENABLE_OFFSET    0x1c
+#define MMU_MMU_WALKING_ST_OFFSET 0x40
+#define MMU_MMU_CNTL_OFFSET   0x44
+#define MMU_MMU_FAULT_AD_OFFSET  0x48
+#define MMU_MMU_TTB_OFFSET  0x4c
+#define MMU_MMU_LOCK_OFFSET   0x50
+#define MMU_MMU_LD_TLB_OFFSET  0x54
+#define MMU_MMU_CAM_OFFSET   0x58
+#define MMU_MMU_RAM_OFFSET   0x5c
+#define MMU_MMU_GFLUSH_OFFSET  0x60
+#define MMU_MMU_FLUSH_ENTRY_OFFSET  0x64
+/* Bitfield mask and offset declarations */
+#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK  0x18
+#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET  3
+#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK  0x1
+#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET   0
+#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
+#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET  0
+#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
+#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
+#define MMU_MMU_CNTL_MMU_ENABLE_MASK    0x2
+#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET   1
+#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
+#define MMU_MMU_LOCK_BASE_VALUE_OFFSET   10
+#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK   0x3f0
+#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET    4
+
+#endif /* _MMU_ACC_INT_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
new file mode 100644
index 0000000..ab1a16d
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -0,0 +1,225 @@
+/*
+ * MMURegAcM.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_REG_ACM_H
+#define _MMU_REG_ACM_H
+
+#include <linux/io.h>
+#include <EasiGlobal.h>
+
+#include "MMUAccInt.h"
+
+#if defined(USE_LEVEL_1_MACROS)
+
+#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
+
+#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
+    data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
+    new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
+    new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
+    data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
+    new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
+    new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
+      __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
+
+#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
+
+#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
+      & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
+      MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
+
+#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
+      MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
+      MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
+
+#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_CNTL_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
+    data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
+    new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
+    new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_CNTL_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
+    data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
+    new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
+    new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
+
+#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_TTB_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
+
+#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LOCK_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+      MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
+      MMU_MMU_LOCK_BASE_VALUE_OFFSET))
+
+#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LOCK_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
+    data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
+    new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
+    new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+      MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
+      MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LOCK_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
+    data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
+    new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
+    new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
+      (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
+      (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
+      MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
+
+#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
+
+#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_CAM_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_RAM_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#endif /* USE_LEVEL_1_MACROS */
+
+#endif /* _MMU_REG_ACM_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h
new file mode 100644
index 0000000..d5266d4
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_defs.h
@@ -0,0 +1,58 @@
+/*
+ * hw_defs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global HW definitions
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HW_DEFS_H
+#define _HW_DEFS_H
+
+/* Page size */
+#define HW_PAGE_SIZE4KB   0x1000
+#define HW_PAGE_SIZE64KB  0x10000
+#define HW_PAGE_SIZE1MB   0x100000
+#define HW_PAGE_SIZE16MB  0x1000000
+
+/* hw_status:  return type for HW API */
+typedef long hw_status;
+
+/*  Macro used to set and clear any bit */
+#define HW_CLEAR	0
+#define HW_SET		1
+
+/* hw_endianism_t:  Enumerated Type used to specify the endianism
+ *		Do NOT change these values. They are used as bit fields. */
+enum hw_endianism_t {
+	HW_LITTLE_ENDIAN,
+	HW_BIG_ENDIAN
+};
+
+/* hw_element_size_t:  Enumerated Type used to specify the element size
+ *		Do NOT change these values. They are used as bit fields. */
+enum hw_element_size_t {
+	HW_ELEM_SIZE8BIT,
+	HW_ELEM_SIZE16BIT,
+	HW_ELEM_SIZE32BIT,
+	HW_ELEM_SIZE64BIT
+};
+
+/* hw_idle_mode_t:  Enumerated Type used to specify Idle modes */
+enum hw_idle_mode_t {
+	HW_FORCE_IDLE,
+	HW_NO_IDLE,
+	HW_SMART_IDLE
+};
+
+#endif /* _HW_DEFS_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
new file mode 100644
index 0000000..014f5d5
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -0,0 +1,562 @@
+/*
+ * hw_mmu.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * API definitions to setup MMU TLB and PTE
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/io.h>
+#include "MMURegAcM.h"
+#include <hw_defs.h>
+#include <hw_mmu.h>
+#include <linux/types.h>
+#include <linux/err.h>
+
+#define MMU_BASE_VAL_MASK	0xFC00
+#define MMU_PAGE_MAX	     3
+#define MMU_ELEMENTSIZE_MAX      3
+#define MMU_ADDR_MASK	    0xFFFFF000
+#define MMU_TTB_MASK	     0xFFFFC000
+#define MMU_SECTION_ADDR_MASK    0xFFF00000
+#define MMU_SSECTION_ADDR_MASK   0xFF000000
+#define MMU_PAGE_TABLE_MASK      0xFFFFFC00
+#define MMU_LARGE_PAGE_MASK      0xFFFF0000
+#define MMU_SMALL_PAGE_MASK      0xFFFFF000
+
+#define MMU_LOAD_TLB	0x00000001
+#define MMU_GFLUSH	0x60
+
+/*
+ * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
+ */
+enum hw_mmu_page_size_t {
+	HW_MMU_SECTION,
+	HW_MMU_LARGE_PAGE,
+	HW_MMU_SMALL_PAGE,
+	HW_MMU_SUPERSECTION
+};
+
+/*
+ * FUNCTION	      : mmu_flush_entry
+ *
+ * INPUTS:
+ *
+ *       Identifier      : base_address
+ *       Type		: const u32
+ *       Description     : Base Address of instance of MMU module
+ *
+ * RETURNS:
+ *
+ *       Type		: hw_status
+ *       Description     : 0		 -- No errors occured
+ *			 RET_BAD_NULL_PARAM     -- A Pointer
+ *						Paramater was set to NULL
+ *
+ * PURPOSE:	      : Flush the TLB entry pointed by the
+ *			lock counter register
+ *			even if this entry is set protected
+ *
+ * METHOD:	       : Check the Input parameter and Flush a
+ *			 single entry in the TLB.
+ */
+static hw_status mmu_flush_entry(const void __iomem *base_address);
+
+/*
+ * FUNCTION	      : mmu_set_cam_entry
+ *
+ * INPUTS:
+ *
+ *       Identifier      : base_address
+ *       TypE		: const u32
+ *       Description     : Base Address of instance of MMU module
+ *
+ *       Identifier      : page_sz
+ *       TypE		: const u32
+ *       Description     : It indicates the page size
+ *
+ *       Identifier      : preserved_bit
+ *       Type		: const u32
+ *       Description     : It indicates the TLB entry is preserved entry
+ *							or not
+ *
+ *       Identifier      : valid_bit
+ *       Type		: const u32
+ *       Description     : It indicates the TLB entry is valid entry or not
+ *
+ *
+ *       Identifier      : virtual_addr_tag
+ *       Type	    	: const u32
+ *       Description     : virtual Address
+ *
+ * RETURNS:
+ *
+ *       Type	    	: hw_status
+ *       Description     : 0		 -- No errors occured
+ *			 RET_BAD_NULL_PARAM     -- A Pointer Paramater
+ *						   was set to NULL
+ *			 RET_PARAM_OUT_OF_RANGE -- Input Parameter out
+ *						   of Range
+ *
+ * PURPOSE:	      	: Set MMU_CAM reg
+ *
+ * METHOD:	       	: Check the Input parameters and set the CAM entry.
+ */
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+				   const u32 page_sz,
+				   const u32 preserved_bit,
+				   const u32 valid_bit,
+				   const u32 virtual_addr_tag);
+
+/*
+ * FUNCTION	      : mmu_set_ram_entry
+ *
+ * INPUTS:
+ *
+ *       Identifier      : base_address
+ *       Type	    	: const u32
+ *       Description     : Base Address of instance of MMU module
+ *
+ *       Identifier      : physical_addr
+ *       Type	    	: const u32
+ *       Description     : Physical Address to which the corresponding
+ *			 virtual   Address shouldpoint
+ *
+ *       Identifier      : endianism
+ *       Type	    	: hw_endianism_t
+ *       Description     : endianism for the given page
+ *
+ *       Identifier      : element_size
+ *       Type	    	: hw_element_size_t
+ *       Description     : The element size ( 8,16, 32 or 64 bit)
+ *
+ *       Identifier      : mixed_size
+ *       Type	    	: hw_mmu_mixed_size_t
+ *       Description     : Element Size to follow CPU or TLB
+ *
+ * RETURNS:
+ *
+ *       Type	    	: hw_status
+ *       Description     : 0		 -- No errors occured
+ *			 RET_BAD_NULL_PARAM     -- A Pointer Paramater
+ *							was set to NULL
+ *			 RET_PARAM_OUT_OF_RANGE -- Input Parameter
+ *							out of Range
+ *
+ * PURPOSE:	      : Set MMU_CAM reg
+ *
+ * METHOD:	       : Check the Input parameters and set the RAM entry.
+ */
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+				   const u32 physical_addr,
+				   enum hw_endianism_t endianism,
+				   enum hw_element_size_t element_size,
+				   enum hw_mmu_mixed_size_t mixed_size);
+
+/* HW FUNCTIONS */
+
+hw_status hw_mmu_enable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
+
+	return status;
+}
+
+hw_status hw_mmu_disable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
+
+	return status;
+}
+
+hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+				u32 num_locked_entries)
+{
+	hw_status status = 0;
+
+	MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
+
+	return status;
+}
+
+hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+				u32 victim_entry_num)
+{
+	hw_status status = 0;
+
+	MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
+
+	return status;
+}
+
+hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
+{
+	hw_status status = 0;
+
+	MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
+
+	return status;
+}
+
+hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
+{
+	hw_status status = 0;
+	u32 irq_reg;
+
+	irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
+
+	MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
+
+	return status;
+}
+
+hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
+{
+	hw_status status = 0;
+	u32 irq_reg;
+
+	irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
+
+	MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
+
+	return status;
+}
+
+hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
+{
+	hw_status status = 0;
+
+	*irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
+
+	return status;
+}
+
+hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
+{
+	hw_status status = 0;
+
+	/* read values from register */
+	*addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
+
+	return status;
+}
+
+hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
+{
+	hw_status status = 0;
+	u32 load_ttb;
+
+	load_ttb = ttb_phys_addr & ~0x7FUL;
+	/* write values to register */
+	MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
+
+	return status;
+}
+
+hw_status hw_mmu_twl_enable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
+
+	return status;
+}
+
+hw_status hw_mmu_twl_disable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
+
+	return status;
+}
+
+hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
+			   u32 page_sz)
+{
+	hw_status status = 0;
+	u32 virtual_addr_tag;
+	enum hw_mmu_page_size_t pg_size_bits;
+
+	switch (page_sz) {
+	case HW_PAGE_SIZE4KB:
+		pg_size_bits = HW_MMU_SMALL_PAGE;
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		pg_size_bits = HW_MMU_LARGE_PAGE;
+		break;
+
+	case HW_PAGE_SIZE1MB:
+		pg_size_bits = HW_MMU_SECTION;
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		pg_size_bits = HW_MMU_SUPERSECTION;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/* Generate the 20-bit tag from virtual address */
+	virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+	mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
+
+	mmu_flush_entry(base_address);
+
+	return status;
+}
+
+hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+			 u32 physical_addr,
+			 u32 virtual_addr,
+			 u32 page_sz,
+			 u32 entry_num,
+			 struct hw_mmu_map_attrs_t *map_attrs,
+			 s8 preserved_bit, s8 valid_bit)
+{
+	hw_status status = 0;
+	u32 lock_reg;
+	u32 virtual_addr_tag;
+	enum hw_mmu_page_size_t mmu_pg_size;
+
+	/*Check the input Parameters */
+	switch (page_sz) {
+	case HW_PAGE_SIZE4KB:
+		mmu_pg_size = HW_MMU_SMALL_PAGE;
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		mmu_pg_size = HW_MMU_LARGE_PAGE;
+		break;
+
+	case HW_PAGE_SIZE1MB:
+		mmu_pg_size = HW_MMU_SECTION;
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		mmu_pg_size = HW_MMU_SUPERSECTION;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
+
+	/* Generate the 20-bit tag from virtual address */
+	virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+	/* Write the fields in the CAM Entry Register */
+	mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
+			  virtual_addr_tag);
+
+	/* Write the different fields of the RAM Entry Register */
+	/* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
+	mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
+			  map_attrs->element_size, map_attrs->mixed_size);
+
+	/* Update the MMU Lock Register */
+	/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
+	MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
+
+	/* Enable loading of an entry in TLB by writing 1
+	   into LD_TLB_REG register */
+	MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
+
+	MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
+
+	return status;
+}
+
+hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+			 u32 physical_addr,
+			 u32 virtual_addr,
+			 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
+{
+	hw_status status = 0;
+	u32 pte_addr, pte_val;
+	s32 num_entries = 1;
+
+	switch (page_sz) {
+	case HW_PAGE_SIZE4KB:
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SMALL_PAGE_MASK);
+		pte_val =
+		    ((physical_addr & MMU_SMALL_PAGE_MASK) |
+		     (map_attrs->endianism << 9) | (map_attrs->
+						    element_size << 4) |
+		     (map_attrs->mixed_size << 11) | 2);
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_LARGE_PAGE_MASK);
+		pte_val =
+		    ((physical_addr & MMU_LARGE_PAGE_MASK) |
+		     (map_attrs->endianism << 9) | (map_attrs->
+						    element_size << 4) |
+		     (map_attrs->mixed_size << 11) | 1);
+		break;
+
+	case HW_PAGE_SIZE1MB:
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SECTION_ADDR_MASK);
+		pte_val =
+		    ((((physical_addr & MMU_SECTION_ADDR_MASK) |
+		       (map_attrs->endianism << 15) | (map_attrs->
+						       element_size << 10) |
+		       (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SSECTION_ADDR_MASK);
+		pte_val =
+		    (((physical_addr & MMU_SSECTION_ADDR_MASK) |
+		      (map_attrs->endianism << 15) | (map_attrs->
+						      element_size << 10) |
+		      (map_attrs->mixed_size << 17)
+		     ) | 0x40000 | 0x2);
+		break;
+
+	case HW_MMU_COARSE_PAGE_SIZE:
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SECTION_ADDR_MASK);
+		pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	while (--num_entries >= 0)
+		((u32 *) pte_addr)[num_entries] = pte_val;
+
+	return status;
+}
+
+hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
+{
+	hw_status status = 0;
+	u32 pte_addr;
+	s32 num_entries = 1;
+
+	switch (page_size) {
+	case HW_PAGE_SIZE4KB:
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SMALL_PAGE_MASK);
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_LARGE_PAGE_MASK);
+		break;
+
+	case HW_PAGE_SIZE1MB:
+	case HW_MMU_COARSE_PAGE_SIZE:
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SECTION_ADDR_MASK);
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SSECTION_ADDR_MASK);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	while (--num_entries >= 0)
+		((u32 *) pte_addr)[num_entries] = 0;
+
+	return status;
+}
+
+/* mmu_flush_entry */
+static hw_status mmu_flush_entry(const void __iomem *base_address)
+{
+	hw_status status = 0;
+	u32 flush_entry_data = 0x1;
+
+	/* write values to register */
+	MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
+
+	return status;
+}
+
+/* mmu_set_cam_entry */
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+				   const u32 page_sz,
+				   const u32 preserved_bit,
+				   const u32 valid_bit,
+				   const u32 virtual_addr_tag)
+{
+	hw_status status = 0;
+	u32 mmu_cam_reg;
+
+	mmu_cam_reg = (virtual_addr_tag << 12);
+	mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
+	    (preserved_bit << 3);
+
+	/* write values to register */
+	MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
+
+	return status;
+}
+
+/* mmu_set_ram_entry */
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+				   const u32 physical_addr,
+				   enum hw_endianism_t endianism,
+				   enum hw_element_size_t element_size,
+				   enum hw_mmu_mixed_size_t mixed_size)
+{
+	hw_status status = 0;
+	u32 mmu_ram_reg;
+
+	mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
+	mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
+				       (mixed_size << 6));
+
+	/* write values to register */
+	MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
+
+	return status;
+
+}
+
+void hw_mmu_tlb_flush_all(const void __iomem *base)
+{
+	__raw_writeb(1, base + MMU_GFLUSH);
+}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
new file mode 100644
index 0000000..1458a2c
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -0,0 +1,163 @@
+/*
+ * hw_mmu.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * MMU types and API declarations
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HW_MMU_H
+#define _HW_MMU_H
+
+#include <linux/types.h>
+
+/* Bitmasks for interrupt sources */
+#define HW_MMU_TRANSLATION_FAULT   0x2
+#define HW_MMU_ALL_INTERRUPTS      0x1F
+
+#define HW_MMU_COARSE_PAGE_SIZE 0x400
+
+/* hw_mmu_mixed_size_t:  Enumerated Type used to specify whether to follow
+			CPU/TLB Element size */
+enum hw_mmu_mixed_size_t {
+	HW_MMU_TLBES,
+	HW_MMU_CPUES
+};
+
+/* hw_mmu_map_attrs_t:  Struct containing MMU mapping attributes */
+struct hw_mmu_map_attrs_t {
+	enum hw_endianism_t endianism;
+	enum hw_element_size_t element_size;
+	enum hw_mmu_mixed_size_t mixed_size;
+	bool donotlockmpupage;
+};
+
+extern hw_status hw_mmu_enable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_disable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+				       u32 num_locked_entries);
+
+extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+				       u32 victim_entry_num);
+
+/* For MMU faults */
+extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
+				  u32 irq_mask);
+
+extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
+				      u32 irq_mask);
+
+extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
+				     u32 irq_mask);
+
+extern hw_status hw_mmu_event_status(const void __iomem *base_address,
+				     u32 *irq_mask);
+
+extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
+					u32 *addr);
+
+/* Set the TT base address */
+extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
+				u32 ttb_phys_addr);
+
+extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
+				  u32 virtual_addr, u32 page_sz);
+
+extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+				u32 physical_addr,
+				u32 virtual_addr,
+				u32 page_sz,
+				u32 entry_num,
+				struct hw_mmu_map_attrs_t *map_attrs,
+				s8 preserved_bit, s8 valid_bit);
+
+/* For PTEs */
+extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+				u32 physical_addr,
+				u32 virtual_addr,
+				u32 page_sz,
+				struct hw_mmu_map_attrs_t *map_attrs);
+
+extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
+				  u32 virtual_addr, u32 page_size);
+
+void hw_mmu_tlb_flush_all(const void __iomem *base);
+
+static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
+{
+	u32 pte_addr;
+	u32 va31_to20;
+
+	va31_to20 = va >> (20 - 2);	/* Left-shift by 2 here itself */
+	va31_to20 &= 0xFFFFFFFCUL;
+	pte_addr = l1_base + va31_to20;
+
+	return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
+{
+	u32 pte_addr;
+
+	pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
+
+	return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
+{
+	u32 pte_coarse;
+
+	pte_coarse = pte_val & 0xFFFFFC00;
+
+	return pte_coarse;
+}
+
+static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
+{
+	u32 pte_size = 0;
+
+	if ((pte_val & 0x3) == 0x1) {
+		/* Points to L2 PT */
+		pte_size = HW_MMU_COARSE_PAGE_SIZE;
+	}
+
+	if ((pte_val & 0x3) == 0x2) {
+		if (pte_val & (1 << 18))
+			pte_size = HW_PAGE_SIZE16MB;
+		else
+			pte_size = HW_PAGE_SIZE1MB;
+	}
+
+	return pte_size;
+}
+
+static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
+{
+	u32 pte_size = 0;
+
+	if (pte_val & 0x2)
+		pte_size = HW_PAGE_SIZE4KB;
+	else if (pte_val & 0x1)
+		pte_size = HW_PAGE_SIZE64KB;
+
+	return pte_size;
+}
+
+#endif /* _HW_MMU_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index dfb55cc..38122db 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -68,6 +68,7 @@
 	void __iomem *dw_per_base;
 	u32 dw_per_pm_base;
 	u32 dw_core_pm_base;
+	void __iomem *dw_dmmu_base;
 	void __iomem *dw_sys_ctrl_base;
 };
 
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index 9bdd48f..357458f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -27,6 +27,7 @@
 #include <dspbridge/nodedefs.h>
 #include <dspbridge/dispdefs.h>
 #include <dspbridge/dspdefs.h>
+#include <dspbridge/dmm.h>
 #include <dspbridge/host_os.h>
 
 /*  ----------------------------------- This */
@@ -233,6 +234,29 @@
 				  struct cmm_object **mgr);
 
 /*
+ *  ======== dev_get_dmm_mgr ========
+ *  Purpose:
+ *      Retrieve the handle to the dynamic memory manager created for this
+ *      device.
+ *  Parameters:
+ *      hdev_obj:     Handle to device object created with
+ *                      dev_create_device().
+ *      *mgr:           Ptr to location to store handle.
+ *  Returns:
+ *      0:        Success.
+ *      -EFAULT:    Invalid hdev_obj.
+ *  Requires:
+ *      mgr != NULL.
+ *      DEV Initialized.
+ *  Ensures:
+ *      0:        *mgr contains a handle to a channel manager object,
+ *                      or NULL.
+ *      else:           *mgr is NULL.
+ */
+extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
+				  struct dmm_object **mgr);
+
+/*
  *  ======== dev_get_cod_mgr ========
  *  Purpose:
  *      Retrieve the COD manager create for this device.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
new file mode 100644
index 0000000..6c58335
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -0,0 +1,75 @@
+/*
+ * dmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
+ * space that can be directly mapped to any MPU buffer or memory region.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DMM_
+#define DMM_
+
+#include <dspbridge/dbdefs.h>
+
+struct dmm_object;
+
+/* DMM attributes used in dmm_create() */
+struct dmm_mgrattrs {
+	u32 reserved;
+};
+
+#define DMMPOOLSIZE      0x4000000
+
+/*
+ *  ======== dmm_get_handle ========
+ *  Purpose:
+ *      Return the dynamic memory manager object for this device.
+ *      This is typically called from the client process.
+ */
+
+extern int dmm_get_handle(void *hprocessor,
+				 struct dmm_object **dmm_manager);
+
+extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
+				     u32 size, u32 *prsv_addr);
+
+extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
+					u32 rsv_addr);
+
+extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
+				 u32 size);
+
+extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
+				    u32 addr, u32 *psize);
+
+extern int dmm_destroy(struct dmm_object *dmm_mgr);
+
+extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
+
+extern int dmm_create(struct dmm_object **dmm_manager,
+			     struct dev_object *hdev_obj,
+			     const struct dmm_mgrattrs *mgr_attrts);
+
+extern bool dmm_init(void);
+
+extern void dmm_exit(void);
+
+extern int dmm_create_tables(struct dmm_object *dmm_mgr,
+				    u32 addr, u32 size);
+
+#ifdef DSP_DMM_DEBUG
+u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
+#endif
+
+#endif /* DMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 75a2c9b..c1f363e 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -108,6 +108,12 @@
 	struct bridge_dma_map_info dma_info;
 };
 
+/* Used for DMM reserved memory accounting */
+struct dmm_rsv_object {
+	struct list_head link;
+	u32 dsp_reserved_addr;
+};
+
 /* New structure (member of process context) abstracts DMM resource info */
 struct dspheap_res_object {
 	s32 heap_allocated;	/* DMM status */
@@ -159,6 +165,10 @@
 	struct list_head dmm_map_list;
 	spinlock_t dmm_map_lock;
 
+	/* DMM reserved memory resources */
+	struct list_head dmm_rsv_list;
+	spinlock_t dmm_rsv_lock;
+
 	/* DSP Heap resources */
 	struct dspheap_res_object *pdspheap_list;
 
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
deleted file mode 100644
index cb38d4c..0000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * dsp-mmu.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * DSP iommu.
- *
- * Copyright (C) 2005-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef _DSP_MMU_
-#define _DSP_MMU_
-
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
-
-/**
- * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
- *
- * This function initialize dsp mmu module and returns a struct iommu
- * handle to use it for dsp maps.
- *
- */
-struct iommu *dsp_mmu_init(void);
-
-/**
- * dsp_mmu_exit() - destroy dsp mmu module
- * @mmu:	Pointer to iommu handle.
- *
- * This function destroys dsp mmu module.
- *
- */
-void dsp_mmu_exit(struct iommu *mmu);
-
-/**
- * user_to_dsp_map() - maps user to dsp virtual address
- * @mmu:	Pointer to iommu handle.
- * @uva:		Virtual user space address.
- * @da		DSP address
- * @size		Buffer size to map.
- * @usr_pgs	struct page array pointer where the user pages will be stored
- *
- * This function maps a user space buffer into DSP virtual address.
- *
- */
-u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
-						struct page **usr_pgs);
-
-/**
- * user_to_dsp_unmap() - unmaps DSP virtual buffer.
- * @mmu:	Pointer to iommu handle.
- * @da		DSP address
- *
- * This function unmaps a user space buffer into DSP virtual address.
- *
- */
-int user_to_dsp_unmap(struct iommu *mmu, u32 da);
-
-#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 6153634..0ae7d16 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -162,6 +162,48 @@
 				       u32 mem_type);
 
 /*
+ *  ======== bridge_brd_mem_map ========
+ *  Purpose:
+ *      Map a MPU memory region to a DSP/IVA memory space
+ *  Parameters:
+ *      dev_ctxt:    Handle to Bridge driver defined device info.
+ *      ul_mpu_addr:      MPU memory region start address.
+ *      virt_addr:      DSP/IVA memory region u8 address.
+ *      ul_num_bytes:     Number of bytes to map.
+ *      map_attrs:       Mapping attributes (e.g. endianness).
+ *  Returns:
+ *      0:        Success.
+ *      -EPERM:      Other, unspecified error.
+ *  Requires:
+ *      dev_ctxt != NULL;
+ *  Ensures:
+ */
+typedef int(*fxn_brd_memmap) (struct bridge_dev_context
+				     * dev_ctxt, u32 ul_mpu_addr,
+				     u32 virt_addr, u32 ul_num_bytes,
+				     u32 map_attr,
+				     struct page **mapped_pages);
+
+/*
+ *  ======== bridge_brd_mem_un_map ========
+ *  Purpose:
+ *      UnMap an MPU memory region from DSP/IVA memory space
+ *  Parameters:
+ *      dev_ctxt:    Handle to Bridge driver defined device info.
+ *      virt_addr:      DSP/IVA memory region u8 address.
+ *      ul_num_bytes:     Number of bytes to unmap.
+ *  Returns:
+ *      0:        Success.
+ *      -EPERM:      Other, unspecified error.
+ *  Requires:
+ *      dev_ctxt != NULL;
+ *  Ensures:
+ */
+typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
+				       * dev_ctxt,
+				       u32 virt_addr, u32 ul_num_bytes);
+
+/*
  *  ======== bridge_brd_stop ========
  *  Purpose:
  *      Bring board to the BRD_STOPPED state.
@@ -951,6 +993,8 @@
 	fxn_brd_setstate pfn_brd_set_state;	/* Sets the Board State */
 	fxn_brd_memcopy pfn_brd_mem_copy;	/* Copies DSP Memory */
 	fxn_brd_memwrite pfn_brd_mem_write;	/* Write DSP Memory w/o halt */
+	fxn_brd_memmap pfn_brd_mem_map;	/* Maps MPU mem to DSP mem */
+	fxn_brd_memunmap pfn_brd_mem_un_map;	/* Unmaps MPU mem to DSP mem */
 	fxn_chnl_create pfn_chnl_create;	/* Create channel manager. */
 	fxn_chnl_destroy pfn_chnl_destroy;	/* Destroy channel manager. */
 	fxn_chnl_open pfn_chnl_open;	/* Create a new channel. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
index bad1801..41e0594 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -19,6 +19,10 @@
 #ifndef DSPIOCTL_
 #define DSPIOCTL_
 
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 /*
  * Any IOCTLS at or above this value are reserved for standard Bridge driver
  * interfaces.
@@ -61,6 +65,9 @@
 	/* GPP virtual address. __va does not work for ioremapped addresses */
 	u32 ul_gpp_va;
 	u32 ul_size;		/* Size of the mapped memory in bytes */
+	enum hw_endianism_t endianism;
+	enum hw_mmu_mixed_size_t mixed_mode;
+	enum hw_element_size_t elem_size;
 };
 
 #endif /* DSPIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 2d12aab..5e09fd1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -551,6 +551,29 @@
 			   struct process_context *pr_ctxt);
 
 /*
+ *  ======== proc_reserve_memory ========
+ *  Purpose:
+ *      Reserve a virtually contiguous region of DSP address space.
+ *  Parameters:
+ *      hprocessor      :   The processor handle.
+ *      ul_size	  :   Size of the address space to reserve.
+ *      pp_rsv_addr       :   Ptr to DSP side reserved u8 address.
+ *  Returns:
+ *      0	 :   Success.
+ *      -EFAULT     :   Invalid processor handle.
+ *      -EPERM       :   General failure.
+ *      -ENOMEM     :   Cannot reserve chunk of this size.
+ *  Requires:
+ *      pp_rsv_addr is not NULL
+ *      PROC Initialized.
+ *  Ensures:
+ *  Details:
+ */
+extern int proc_reserve_memory(void *hprocessor,
+				      u32 ul_size, void **pp_rsv_addr,
+				      struct process_context *pr_ctxt);
+
+/*
  *  ======== proc_un_map ========
  *  Purpose:
  *      Removes a MPU buffer mapping from the DSP address space.
@@ -572,4 +595,27 @@
 extern int proc_un_map(void *hprocessor, void *map_addr,
 			      struct process_context *pr_ctxt);
 
+/*
+ *  ======== proc_un_reserve_memory ========
+ *  Purpose:
+ *      Frees a previously reserved region of DSP address space.
+ *  Parameters:
+ *      hprocessor      :   The processor handle.
+ *      prsv_addr	:   Ptr to DSP side reservedBYTE address.
+ *  Returns:
+ *      0	 :   Success.
+ *      -EFAULT     :   Invalid processor handle.
+ *      -EPERM       :   General failure.
+ *      -ENOENT   :   Cannot find a reserved region starting with this
+ *		      :   address.
+ *  Requires:
+ *      prsv_addr is not NULL
+ *      PROC Initialized.
+ *  Ensures:
+ *  Details:
+ */
+extern int proc_un_reserve_memory(void *hprocessor,
+					 void *prsv_addr,
+					 struct process_context *pr_ctxt);
+
 #endif /* PROC_ */
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 7b30267..132e960 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -34,6 +34,7 @@
 #include <dspbridge/cod.h>
 #include <dspbridge/drv.h>
 #include <dspbridge/proc.h>
+#include <dspbridge/dmm.h>
 
 /*  ----------------------------------- Resource Manager */
 #include <dspbridge/mgr.h>
@@ -74,6 +75,7 @@
 	struct msg_mgr *hmsg_mgr;	/* Message manager. */
 	struct io_mgr *hio_mgr;	/* IO manager (CHNL, msg_ctrl) */
 	struct cmm_object *hcmm_mgr;	/* SM memory manager. */
+	struct dmm_object *dmm_mgr;	/* Dynamic memory manager. */
 	struct ldr_module *module_obj;	/* Bridge Module handle. */
 	u32 word_size;		/* DSP word size: quick access. */
 	struct drv_object *hdrv_obj;	/* Driver Object */
@@ -248,6 +250,9 @@
 			/* Instantiate the DEH module */
 			status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
 		}
+		/* Create DMM mgr . */
+		status = dmm_create(&dev_obj->dmm_mgr,
+				    (struct dev_object *)dev_obj, NULL);
 	}
 	/* Add the new DEV_Object to the global list: */
 	if (!status) {
@@ -273,6 +278,8 @@
 			kfree(dev_obj->proc_list);
 			if (dev_obj->cod_mgr)
 				cod_delete(dev_obj->cod_mgr);
+			if (dev_obj->dmm_mgr)
+				dmm_destroy(dev_obj->dmm_mgr);
 			kfree(dev_obj);
 		}
 
@@ -382,6 +389,11 @@
 			dev_obj->hcmm_mgr = NULL;
 		}
 
+		if (dev_obj->dmm_mgr) {
+			dmm_destroy(dev_obj->dmm_mgr);
+			dev_obj->dmm_mgr = NULL;
+		}
+
 		/* Call the driver's bridge_dev_destroy() function: */
 		/* Require of DevDestroy */
 		if (dev_obj->hbridge_context) {
@@ -462,6 +474,32 @@
 }
 
 /*
+ *  ======== dev_get_dmm_mgr ========
+ *  Purpose:
+ *      Retrieve the handle to the dynamic memory manager created for this
+ *      device.
+ */
+int dev_get_dmm_mgr(struct dev_object *hdev_obj,
+			   struct dmm_object **mgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(mgr != NULL);
+
+	if (hdev_obj) {
+		*mgr = dev_obj->dmm_mgr;
+	} else {
+		*mgr = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
+	return status;
+}
+
+/*
  *  ======== dev_get_cod_mgr ========
  *  Purpose:
  *      Retrieve the COD manager create for this device.
@@ -713,8 +751,10 @@
 
 	refs--;
 
-	if (refs == 0)
+	if (refs == 0) {
 		cmm_exit();
+		dmm_exit();
+	}
 
 	DBC_ENSURE(refs >= 0);
 }
@@ -726,12 +766,25 @@
  */
 bool dev_init(void)
 {
-	bool ret = true;
+	bool cmm_ret, dmm_ret, ret = true;
 
 	DBC_REQUIRE(refs >= 0);
 
-	if (refs == 0)
-		ret = cmm_init();
+	if (refs == 0) {
+		cmm_ret = cmm_init();
+		dmm_ret = dmm_init();
+
+		ret = cmm_ret && dmm_ret;
+
+		if (!ret) {
+			if (cmm_ret)
+				cmm_exit();
+
+			if (dmm_ret)
+				dmm_exit();
+
+		}
+	}
 
 	if (ret)
 		refs++;
@@ -1065,6 +1118,8 @@
 		STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
 		STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
 		STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
+		STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
+		STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
 		STORE_FXN(fxn_chnl_create, pfn_chnl_create);
 		STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
 		STORE_FXN(fxn_chnl_open, pfn_chnl_open);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 0000000..8685233
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
+/*
+ * dmm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
+ * space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Notes:
+ *   Region: Generic memory entitiy having a start address and a size
+ *   Chunk:  Reserved region
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/sync.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/proc.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/dmm.h>
+
+/*  ----------------------------------- Defines, Data Structures, Typedefs */
+#define DMM_ADDR_VIRTUAL(a) \
+	(((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
+	dyn_mem_map_beg)
+#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
+
+/* DMM Mgr */
+struct dmm_object {
+	/* Dmm Lock is used to serialize access mem manager for
+	 * multi-threads. */
+	spinlock_t dmm_lock;	/* Lock to access dmm mgr */
+};
+
+/*  ----------------------------------- Globals */
+static u32 refs;		/* module reference count */
+struct map_page {
+	u32 region_size:15;
+	u32 mapped_size:15;
+	u32 reserved:1;
+	u32 mapped:1;
+};
+
+/*  Create the free list */
+static struct map_page *virtual_mapping_table;
+static u32 free_region;		/* The index of free region */
+static u32 free_size;
+static u32 dyn_mem_map_beg;	/* The Beginning of dynamic memory mapping */
+static u32 table_size;		/* The size of virt and phys pages tables */
+
+/*  ----------------------------------- Function Prototypes */
+static struct map_page *get_region(u32 addr);
+static struct map_page *get_free_region(u32 len);
+static struct map_page *get_mapped_region(u32 addrs);
+
+/*  ======== dmm_create_tables ========
+ *  Purpose:
+ *      Create table to hold the information of physical address
+ *      the buffer pages that is passed by the user, and the table
+ *      to hold the information of the virtual memory that is reserved
+ *      for DSP.
+ */
+int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	int status = 0;
+
+	status = dmm_delete_tables(dmm_obj);
+	if (!status) {
+		dyn_mem_map_beg = addr;
+		table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
+		/*  Create the free list */
+		virtual_mapping_table = __vmalloc(table_size *
+				sizeof(struct map_page), GFP_KERNEL |
+				__GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+		if (virtual_mapping_table == NULL)
+			status = -ENOMEM;
+		else {
+			/* On successful allocation,
+			 * all entries are zero ('free') */
+			free_region = 0;
+			free_size = table_size * PG_SIZE4K;
+			virtual_mapping_table[0].region_size = table_size;
+		}
+	}
+
+	if (status)
+		pr_err("%s: failure, status 0x%x\n", __func__, status);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_create ========
+ *  Purpose:
+ *      Create a dynamic memory manager object.
+ */
+int dmm_create(struct dmm_object **dmm_manager,
+		      struct dev_object *hdev_obj,
+		      const struct dmm_mgrattrs *mgr_attrts)
+{
+	struct dmm_object *dmm_obj = NULL;
+	int status = 0;
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(dmm_manager != NULL);
+
+	*dmm_manager = NULL;
+	/* create, zero, and tag a cmm mgr object */
+	dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
+	if (dmm_obj != NULL) {
+		spin_lock_init(&dmm_obj->dmm_lock);
+		*dmm_manager = dmm_obj;
+	} else {
+		status = -ENOMEM;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dmm_destroy ========
+ *  Purpose:
+ *      Release the communication memory manager resources.
+ */
+int dmm_destroy(struct dmm_object *dmm_mgr)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	if (dmm_mgr) {
+		status = dmm_delete_tables(dmm_obj);
+		if (!status)
+			kfree(dmm_obj);
+	} else
+		status = -EFAULT;
+
+	return status;
+}
+
+/*
+ *  ======== dmm_delete_tables ========
+ *  Purpose:
+ *      Delete DMM Tables.
+ */
+int dmm_delete_tables(struct dmm_object *dmm_mgr)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	/* Delete all DMM tables */
+	if (dmm_mgr)
+		vfree(virtual_mapping_table);
+	else
+		status = -EFAULT;
+	return status;
+}
+
+/*
+ *  ======== dmm_exit ========
+ *  Purpose:
+ *      Discontinue usage of module; free resources when reference count
+ *      reaches 0.
+ */
+void dmm_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+}
+
+/*
+ *  ======== dmm_get_handle ========
+ *  Purpose:
+ *      Return the dynamic memory manager object for this device.
+ *      This is typically called from the client process.
+ */
+int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
+{
+	int status = 0;
+	struct dev_object *hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(dmm_manager != NULL);
+	if (hprocessor != NULL)
+		status = proc_get_dev_object(hprocessor, &hdev_obj);
+	else
+		hdev_obj = dev_get_first();	/* default */
+
+	if (!status)
+		status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_init ========
+ *  Purpose:
+ *      Initializes private state of DMM module.
+ */
+bool dmm_init(void)
+{
+	bool ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+	virtual_mapping_table = NULL;
+	table_size = 0;
+
+	return ret;
+}
+
+/*
+ *  ======== dmm_map_memory ========
+ *  Purpose:
+ *      Add a mapping block to the reserved chunk. DMM assumes that this block
+ *  will be mapped in the DSP/IVA's address space. DMM returns an error if a
+ *  mapping overlaps another one. This function stores the info that will be
+ *  required later while unmapping the block.
+ */
+int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	int status = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+	/* Find the Reserved memory chunk containing the DSP block to
+	 * be mapped */
+	chunk = (struct map_page *)get_region(addr);
+	if (chunk != NULL) {
+		/* Mark the region 'mapped', leave the 'reserved' info as-is */
+		chunk->mapped = true;
+		chunk->mapped_size = (size / PG_SIZE4K);
+	} else
+		status = -ENOENT;
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
+		"chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_reserve_memory ========
+ *  Purpose:
+ *      Reserve a chunk of virtually contiguous DSP/IVA address space.
+ */
+int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
+			      u32 *prsv_addr)
+{
+	int status = 0;
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *node;
+	u32 rsv_addr = 0;
+	u32 rsv_size = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+
+	/* Try to get a DSP chunk from the free list */
+	node = get_free_region(size);
+	if (node != NULL) {
+		/*  DSP chunk of given size is available. */
+		rsv_addr = DMM_ADDR_VIRTUAL(node);
+		/* Calculate the number entries to use */
+		rsv_size = size / PG_SIZE4K;
+		if (rsv_size < node->region_size) {
+			/* Mark remainder of free region */
+			node[rsv_size].mapped = false;
+			node[rsv_size].reserved = false;
+			node[rsv_size].region_size =
+			    node->region_size - rsv_size;
+			node[rsv_size].mapped_size = 0;
+		}
+		/*  get_region will return first fit chunk. But we only use what
+		   is requested. */
+		node->mapped = false;
+		node->reserved = true;
+		node->region_size = rsv_size;
+		node->mapped_size = 0;
+		/* Return the chunk's starting address */
+		*prsv_addr = rsv_addr;
+	} else
+		/*dSP chunk of given size is not available */
+		status = -ENOMEM;
+
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
+		"rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
+		prsv_addr, status, rsv_addr, rsv_size);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_un_map_memory ========
+ *  Purpose:
+ *      Remove the mapped block from the reserved chunk.
+ */
+int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	int status = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+	chunk = get_mapped_region(addr);
+	if (chunk == NULL)
+		status = -ENOENT;
+
+	if (!status) {
+		/* Unmap the region */
+		*psize = chunk->mapped_size * PG_SIZE4K;
+		chunk->mapped = false;
+		chunk->mapped_size = 0;
+	}
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
+		"chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_un_reserve_memory ========
+ *  Purpose:
+ *      Free a chunk of reserved DSP/IVA address space.
+ */
+int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	u32 i;
+	int status = 0;
+	u32 chunk_size;
+
+	spin_lock(&dmm_obj->dmm_lock);
+
+	/* Find the chunk containing the reserved address */
+	chunk = get_mapped_region(rsv_addr);
+	if (chunk == NULL)
+		status = -ENOENT;
+
+	if (!status) {
+		/* Free all the mapped pages for this reserved region */
+		i = 0;
+		while (i < chunk->region_size) {
+			if (chunk[i].mapped) {
+				/* Remove mapping from the page tables. */
+				chunk_size = chunk[i].mapped_size;
+				/* Clear the mapping flags */
+				chunk[i].mapped = false;
+				chunk[i].mapped_size = 0;
+				i += chunk_size;
+			} else
+				i++;
+		}
+		/* Clear the flags (mark the region 'free') */
+		chunk->reserved = false;
+		/* NOTE: We do NOT coalesce free regions here.
+		 * Free regions are coalesced in get_region(), as it traverses
+		 *the whole mapping table
+		 */
+	}
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
+		__func__, dmm_mgr, rsv_addr, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== get_region ========
+ *  Purpose:
+ *      Returns a region containing the specified memory region
+ */
+static struct map_page *get_region(u32 addr)
+{
+	struct map_page *curr_region = NULL;
+	u32 i = 0;
+
+	if (virtual_mapping_table != NULL) {
+		/* find page mapped by this address */
+		i = DMM_ADDR_TO_INDEX(addr);
+		if (i < table_size)
+			curr_region = virtual_mapping_table + i;
+	}
+
+	dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
+		__func__, curr_region, free_region, free_size);
+	return curr_region;
+}
+
+/*
+ *  ======== get_free_region ========
+ *  Purpose:
+ *  Returns the requested free region
+ */
+static struct map_page *get_free_region(u32 len)
+{
+	struct map_page *curr_region = NULL;
+	u32 i = 0;
+	u32 region_size = 0;
+	u32 next_i = 0;
+
+	if (virtual_mapping_table == NULL)
+		return curr_region;
+	if (len > free_size) {
+		/* Find the largest free region
+		 * (coalesce during the traversal) */
+		while (i < table_size) {
+			region_size = virtual_mapping_table[i].region_size;
+			next_i = i + region_size;
+			if (virtual_mapping_table[i].reserved == false) {
+				/* Coalesce, if possible */
+				if (next_i < table_size &&
+				    virtual_mapping_table[next_i].reserved
+				    == false) {
+					virtual_mapping_table[i].region_size +=
+					    virtual_mapping_table
+					    [next_i].region_size;
+					continue;
+				}
+				region_size *= PG_SIZE4K;
+				if (region_size > free_size) {
+					free_region = i;
+					free_size = region_size;
+				}
+			}
+			i = next_i;
+		}
+	}
+	if (len <= free_size) {
+		curr_region = virtual_mapping_table + free_region;
+		free_region += (len / PG_SIZE4K);
+		free_size -= len;
+	}
+	return curr_region;
+}
+
+/*
+ *  ======== get_mapped_region ========
+ *  Purpose:
+ *  Returns the requestedmapped region
+ */
+static struct map_page *get_mapped_region(u32 addrs)
+{
+	u32 i = 0;
+	struct map_page *curr_region = NULL;
+
+	if (virtual_mapping_table == NULL)
+		return curr_region;
+
+	i = DMM_ADDR_TO_INDEX(addrs);
+	if (i < table_size && (virtual_mapping_table[i].mapped ||
+			       virtual_mapping_table[i].reserved))
+		curr_region = virtual_mapping_table + i;
+	return curr_region;
+}
+
+#ifdef DSP_DMM_DEBUG
+u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
+{
+	struct map_page *curr_node = NULL;
+	u32 i;
+	u32 freemem = 0;
+	u32 bigsize = 0;
+
+	spin_lock(&dmm_mgr->dmm_lock);
+
+	if (virtual_mapping_table != NULL) {
+		for (i = 0; i < table_size; i +=
+		     virtual_mapping_table[i].region_size) {
+			curr_node = virtual_mapping_table + i;
+			if (curr_node->reserved) {
+				/*printk("RESERVED size = 0x%x, "
+				   "Map size = 0x%x\n",
+				   (curr_node->region_size * PG_SIZE4K),
+				   (curr_node->mapped == false) ? 0 :
+				   (curr_node->mapped_size * PG_SIZE4K));
+				 */
+			} else {
+/*				printk("UNRESERVED size = 0x%x\n",
+					(curr_node->region_size * PG_SIZE4K));
+ */
+				freemem += (curr_node->region_size * PG_SIZE4K);
+				if (curr_node->region_size > bigsize)
+					bigsize = curr_node->region_size;
+			}
+		}
+	}
+	spin_unlock(&dmm_mgr->dmm_lock);
+	printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+	       freemem / (1024 * 1024));
+	printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+	       (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
+	printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+	       (bigsize * PG_SIZE4K / (1024 * 1024)));
+
+	return 0;
+}
+#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 981551c..86ca785 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -993,10 +993,27 @@
 /*
  * ======== procwrap_reserve_memory ========
  */
-u32 __deprecated procwrap_reserve_memory(union trapped_args *args,
-							void *pr_ctxt)
+u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
 {
-	return 0;
+	int status;
+	void *prsv_addr;
+	void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+	if ((args->args_proc_rsvmem.ul_size <= 0) ||
+	    (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
+		return -EINVAL;
+
+	status = proc_reserve_memory(hprocessor,
+				     args->args_proc_rsvmem.ul_size, &prsv_addr,
+				     pr_ctxt);
+	if (!status) {
+		if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
+			status = -EINVAL;
+			proc_un_reserve_memory(args->args_proc_rsvmem.
+					       hprocessor, prsv_addr, pr_ctxt);
+		}
+	}
+	return status;
 }
 
 /*
@@ -1025,10 +1042,15 @@
 /*
  * ======== procwrap_un_reserve_memory ========
  */
-u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args,
-							void *pr_ctxt)
+u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
 {
-	return 0;
+	int status;
+	void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+	status = proc_un_reserve_memory(hprocessor,
+					args->args_proc_unrsvmem.prsv_addr,
+					pr_ctxt);
+	return status;
 }
 
 /*
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 91cc1685..81b1b90 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -146,6 +146,7 @@
 	struct process_context *ctxt = (struct process_context *)process_ctxt;
 	int status = 0;
 	struct dmm_map_object *temp_map, *map_obj;
+	struct dmm_rsv_object *temp_rsv, *rsv_obj;
 
 	/* Free DMM mapped memory resources */
 	list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
@@ -155,6 +156,16 @@
 			pr_err("%s: proc_un_map failed!"
 			       " status = 0x%xn", __func__, status);
 	}
+
+	/* Free DMM reserved memory resources */
+	list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
+		status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
+						rsv_obj->dsp_reserved_addr,
+						ctxt);
+		if (status)
+			pr_err("%s: proc_un_reserve_memory failed!"
+			       " status = 0x%xn", __func__, status);
+	}
 	return status;
 }
 
@@ -732,6 +743,7 @@
 	host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
 	dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
 	dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
+	dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
 
 	/* for 24xx base port is not mapping the mamory for DSP
 	 * internal memory TODO Do a ioremap here */
@@ -785,6 +797,8 @@
 							 OMAP_PER_PRM_SIZE);
 		host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
 							  OMAP_CORE_PRM_SIZE);
+		host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
+						 OMAP_DMMU_SIZE);
 
 		dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
 			host_res->dw_mem_base[0]);
@@ -796,6 +810,7 @@
 			host_res->dw_mem_base[3]);
 		dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
 			host_res->dw_mem_base[4]);
+		dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
 
 		shm_size = drv_datap->shm_size;
 		if (shm_size >= 0x10000) {
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 34be43f..324fcdf 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -509,6 +509,8 @@
 		pr_ctxt->res_state = PROC_RES_ALLOCATED;
 		spin_lock_init(&pr_ctxt->dmm_map_lock);
 		INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
+		spin_lock_init(&pr_ctxt->dmm_rsv_lock);
+		INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
 
 		pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
 		if (pr_ctxt->node_id) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index a660247..1562f3c 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -56,6 +56,7 @@
 /*  ----------------------------------- This */
 #include <dspbridge/nodepriv.h>
 #include <dspbridge/node.h>
+#include <dspbridge/dmm.h>
 
 /* Static/Dynamic Loader includes */
 #include <dspbridge/dbll.h>
@@ -316,6 +317,10 @@
 	u32 mapped_addr = 0;
 	u32 map_attrs = 0x0;
 	struct dsp_processorstate proc_state;
+#ifdef DSP_DMM_DEBUG
+	struct dmm_object *dmm_mgr;
+	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+#endif
 
 	void *node_res;
 
@@ -425,12 +430,34 @@
 	if (status)
 		goto func_cont;
 
+	status = proc_reserve_memory(hprocessor,
+				     pnode->create_args.asa.task_arg_obj.
+				     heap_size + PAGE_SIZE,
+				     (void **)&(pnode->create_args.asa.
+					task_arg_obj.udsp_heap_res_addr),
+				     pr_ctxt);
+	if (status) {
+		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
+		       __func__, status);
+		goto func_cont;
+	}
+#ifdef DSP_DMM_DEBUG
+	status = dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = DSP_EHANDLE;
+		goto func_cont;
+	}
+
+	dmm_mem_map_dump(dmm_mgr);
+#endif
+
 	map_attrs |= DSP_MAPLITTLEENDIAN;
 	map_attrs |= DSP_MAPELEMSIZE32;
 	map_attrs |= DSP_MAPVIRTUALADDR;
 	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
 			  pnode->create_args.asa.task_arg_obj.heap_size,
-			  NULL, (void **)&mapped_addr, map_attrs,
+			  (void *)pnode->create_args.asa.task_arg_obj.
+			  udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
 			  pr_ctxt);
 	if (status)
 		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@@ -2484,7 +2511,11 @@
 	struct stream_chnl stream;
 	struct node_msgargs node_msg_args;
 	struct node_taskargs task_arg_obj;
-
+#ifdef DSP_DMM_DEBUG
+	struct dmm_object *dmm_mgr;
+	struct proc_object *p_proc_object =
+	    (struct proc_object *)hnode->hprocessor;
+#endif
 	int status;
 	if (!hnode)
 		goto func_end;
@@ -2545,6 +2576,19 @@
 			status = proc_un_map(hnode->hprocessor, (void *)
 					     task_arg_obj.udsp_heap_addr,
 					     pr_ctxt);
+
+			status = proc_un_reserve_memory(hnode->hprocessor,
+							(void *)
+							task_arg_obj.
+							udsp_heap_res_addr,
+							pr_ctxt);
+#ifdef DSP_DMM_DEBUG
+			status = dmm_get_handle(p_proc_object, &dmm_mgr);
+			if (dmm_mgr)
+				dmm_mem_map_dump(dmm_mgr);
+			else
+				status = DSP_EHANDLE;
+#endif
 		}
 	}
 	if (node_type != NODE_MESSAGE) {
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 7a15a02..b47d7aa 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -39,6 +39,7 @@
 #include <dspbridge/cod.h>
 #include <dspbridge/dev.h>
 #include <dspbridge/procpriv.h>
+#include <dspbridge/dmm.h>
 
 /*  ----------------------------------- Resource Manager */
 #include <dspbridge/mgr.h>
@@ -51,7 +52,6 @@
 #include <dspbridge/msg.h>
 #include <dspbridge/dspioctl.h>
 #include <dspbridge/drv.h>
-#include <_tiomap.h>
 
 /*  ----------------------------------- This */
 #include <dspbridge/proc.h>
@@ -151,21 +151,34 @@
 	return map_obj;
 }
 
+static int match_exact_map_obj(struct dmm_map_object *map_obj,
+					u32 dsp_addr, u32 size)
+{
+	if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
+		pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
+				__func__, dsp_addr, map_obj->size, size);
+
+	return map_obj->dsp_addr == dsp_addr &&
+		map_obj->size == size;
+}
+
 static void remove_mapping_information(struct process_context *pr_ctxt,
-						u32 dsp_addr)
+						u32 dsp_addr, u32 size)
 {
 	struct dmm_map_object *map_obj;
 
-	pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr);
+	pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
+							dsp_addr, size);
 
 	spin_lock(&pr_ctxt->dmm_map_lock);
 	list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
-		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n",
+		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
 							__func__,
 							map_obj->mpu_addr,
-							map_obj->dsp_addr);
+							map_obj->dsp_addr,
+							map_obj->size);
 
-		if (map_obj->dsp_addr == dsp_addr) {
+		if (match_exact_map_obj(map_obj, dsp_addr, size)) {
 			pr_debug("%s: match, deleting map info\n", __func__);
 			list_del(&map_obj->link);
 			kfree(map_obj->dma_info.sg);
@@ -1077,6 +1090,7 @@
 	s32 cnew_envp;		/* "  " in new_envp[] */
 	s32 nproc_id = 0;	/* Anticipate MP version. */
 	struct dcd_manager *hdcd_handle;
+	struct dmm_object *dmm_mgr;
 	u32 dw_ext_end;
 	u32 proc_id;
 	int brd_state;
@@ -1267,6 +1281,25 @@
 			if (!status)
 				status = cod_get_sym_value(cod_mgr, EXTEND,
 							   &dw_ext_end);
+
+			/* Reset DMM structs and add an initial free chunk */
+			if (!status) {
+				status =
+				    dev_get_dmm_mgr(p_proc_object->hdev_obj,
+						    &dmm_mgr);
+				if (dmm_mgr) {
+					/* Set dw_ext_end to DMM START u8
+					 * address */
+					dw_ext_end =
+					    (dw_ext_end + 1) * DSPWORDSIZE;
+					/* DMM memory is from EXT_END */
+					status = dmm_create_tables(dmm_mgr,
+								   dw_ext_end,
+								   DMMPOOLSIZE);
+				} else {
+					status = -EFAULT;
+				}
+			}
 		}
 	}
 	/* Restore the original argv[0] */
@@ -1319,10 +1352,12 @@
 {
 	u32 va_align;
 	u32 pa_align;
+	struct dmm_object *dmm_mgr;
 	u32 size_align;
 	int status = 0;
 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
 	struct dmm_map_object *map_obj;
+	u32 tmp_addr = 0;
 
 #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
 	if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@@ -1347,30 +1382,33 @@
 	}
 	/* Critical section */
 	mutex_lock(&proc_lock);
+	dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (dmm_mgr)
+		status = dmm_map_memory(dmm_mgr, va_align, size_align);
+	else
+		status = -EFAULT;
 
 	/* Add mapping to the page tables. */
 	if (!status) {
+
+		/* Mapped address = MSB of VA | LSB of PA */
+		tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
 		/* mapped memory resource tracking */
-		map_obj = add_mapping_info(pr_ctxt, pa_align, va_align,
+		map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
 						size_align);
-		if (!map_obj) {
+		if (!map_obj)
 			status = -ENOMEM;
-		} else {
-			va_align = user_to_dsp_map(
-				p_proc_object->hbridge_context->dsp_mmu,
-				pa_align, va_align, size_align,
-				map_obj->pages);
-			if (IS_ERR_VALUE(va_align))
-				status = (int)va_align;
-		}
+		else
+			status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
+			    (p_proc_object->hbridge_context, pa_align, va_align,
+			     size_align, ul_map_attr, map_obj->pages);
 	}
 	if (!status) {
 		/* Mapped address = MSB of VA | LSB of PA */
-		map_obj->dsp_addr = (va_align |
-					((u32)pmpu_addr & (PG_SIZE4K - 1)));
-		*pp_map_addr = (void *)map_obj->dsp_addr;
+		*pp_map_addr = (void *) tmp_addr;
 	} else {
-		remove_mapping_information(pr_ctxt, va_align);
+		remove_mapping_information(pr_ctxt, tmp_addr, size_align);
+		dmm_un_map_memory(dmm_mgr, va_align, &size_align);
 	}
 	mutex_unlock(&proc_lock);
 
@@ -1463,6 +1501,55 @@
 }
 
 /*
+ *  ======== proc_reserve_memory ========
+ *  Purpose:
+ *      Reserve a virtually contiguous region of DSP address space.
+ */
+int proc_reserve_memory(void *hprocessor, u32 ul_size,
+			       void **pp_rsv_addr,
+			       struct process_context *pr_ctxt)
+{
+	struct dmm_object *dmm_mgr;
+	int status = 0;
+	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+	struct dmm_rsv_object *rsv_obj;
+
+	if (!p_proc_object) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
+	if (status != 0)
+		goto func_end;
+
+	/*
+	 * A successful reserve should be followed by insertion of rsv_obj
+	 * into dmm_rsv_list, so that reserved memory resource tracking
+	 * remains uptodate
+	 */
+	rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
+	if (rsv_obj) {
+		rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
+		spin_lock(&pr_ctxt->dmm_rsv_lock);
+		list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
+		spin_unlock(&pr_ctxt->dmm_rsv_lock);
+	}
+
+func_end:
+	dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
+		"status 0x%x\n", __func__, hprocessor,
+		ul_size, pp_rsv_addr, status);
+	return status;
+}
+
+/*
  *  ======== proc_start ========
  *  Purpose:
  *      Start a processor running.
@@ -1610,7 +1697,9 @@
 {
 	int status = 0;
 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+	struct dmm_object *dmm_mgr;
 	u32 va_align;
+	u32 size_align;
 
 	va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
 	if (!p_proc_object) {
@@ -1618,11 +1707,24 @@
 		goto func_end;
 	}
 
+	status = dmm_get_handle(hprocessor, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
 	/* Critical section */
 	mutex_lock(&proc_lock);
+	/*
+	 * Update DMM structures. Get the size to unmap.
+	 * This function returns error if the VA is not mapped
+	 */
+	status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
 	/* Remove mapping from the page tables. */
-	status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu,
-								va_align);
+	if (!status) {
+		status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
+		    (p_proc_object->hbridge_context, va_align, size_align);
+	}
 
 	mutex_unlock(&proc_lock);
 	if (status)
@@ -1633,7 +1735,7 @@
 	 * from dmm_map_list, so that mapped memory resource tracking
 	 * remains uptodate
 	 */
-	remove_mapping_information(pr_ctxt, (u32) map_addr);
+	remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
 
 func_end:
 	dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@@ -1642,6 +1744,55 @@
 }
 
 /*
+ *  ======== proc_un_reserve_memory ========
+ *  Purpose:
+ *      Frees a previously reserved region of DSP address space.
+ */
+int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
+				  struct process_context *pr_ctxt)
+{
+	struct dmm_object *dmm_mgr;
+	int status = 0;
+	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+	struct dmm_rsv_object *rsv_obj;
+
+	if (!p_proc_object) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
+	if (status != 0)
+		goto func_end;
+
+	/*
+	 * A successful unreserve should be followed by removal of rsv_obj
+	 * from dmm_rsv_list, so that reserved memory resource tracking
+	 * remains uptodate
+	 */
+	spin_lock(&pr_ctxt->dmm_rsv_lock);
+	list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
+		if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
+			list_del(&rsv_obj->link);
+			kfree(rsv_obj);
+			break;
+		}
+	}
+	spin_unlock(&pr_ctxt->dmm_rsv_lock);
+
+func_end:
+	dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
+		__func__, hprocessor, prsv_addr, status);
+	return status;
+}
+
+/*
  *  ======== = proc_monitor ======== ==
  *  Purpose:
  *      Place the Processor in Monitor State. This is an internal
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 5969e84..fed2510 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -887,7 +887,7 @@
 
 		struct fb_deferred_io *fbdefio;
 
-		fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io));
+		fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
 		if (fbdefio) {
 			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e992d5d..7cc3d24 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1675,13 +1675,14 @@
 
 		{
 			char essid[IW_ESSID_MAX_SIZE+1];
-			if (wrq->u.essid.pointer)
+			if (wrq->u.essid.pointer) {
 				rc = iwctl_giwessid(dev, NULL,
 						    &(wrq->u.essid), essid);
 				if (copy_to_user(wrq->u.essid.pointer,
 						         essid,
 						         wrq->u.essid.length) )
 					rc = -EFAULT;
+			}
 		}
 		break;
 
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
index 5a21970..7777d9a 100644
--- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c
+++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
@@ -1417,7 +1417,6 @@
 	 */
 	bus_mask   = 0;
 	media_mask = 0;
-	media_mask = 0;
 	for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
 		for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) {
 			if (config_p->devices_to_enumerate[bus][device] ==
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 4af83d5..6a71f52 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -139,7 +139,7 @@
 }
 
 int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
-		   u8 key_index, const u8 *mac_addr,
+		   u8 key_index, bool pairwise, const u8 *mac_addr,
 		   struct key_params *params)
 {
 	wlandevice_t *wlandev = dev->ml_priv;
@@ -198,7 +198,7 @@
 }
 
 int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
-		   u8 key_index, const u8 *mac_addr, void *cookie,
+		   u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
 		   void (*callback)(void *cookie, struct key_params*))
 {
 	wlandevice_t *wlandev = dev->ml_priv;
@@ -227,7 +227,7 @@
 }
 
 int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
-		   u8 key_index, const u8 *mac_addr)
+		   u8 key_index, bool pairwise, const u8 *mac_addr)
 {
 	wlandevice_t *wlandev = dev->ml_priv;
 	u32 did;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index aa1792c8..b7b4a73 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -522,8 +522,8 @@
 		if (copy_to_user(useraddr, &edata, sizeof(edata)))
 			return -EFAULT;
 		return 0;
-	}
 #endif
+	}
 
 	return -EOPNOTSUPP;
 }
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 04ef3ef..81b46585 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -716,8 +716,8 @@
 		if (msg->len < 128)
 			*--dp = (msg->len << 1) | EA;
 		else {
-			*--dp = (msg->len >> 6) | EA;
-			*--dp = (msg->len & 127) << 1;
+			*--dp = ((msg->len & 127) << 1) | EA;
+			*--dp = (msg->len >> 6) & 0xfe;
 		}
 	}
 
@@ -2375,6 +2375,7 @@
 	gsm->mru = c->mru;
 	gsm->encoding = c->encapsulation;
 	gsm->adaption = c->adaption;
+	gsm->n2 = c->n2;
 
 	if (c->i == 1)
 		gsm->ftype = UIH;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index cc1e985..d8210ca 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,7 +413,8 @@
 	spin_lock_irqsave(&tty->buf.lock, flags);
 
 	if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
-		struct tty_buffer *head;
+		struct tty_buffer *head, *tail = tty->buf.tail;
+		int seen_tail = 0;
 		while ((head = tty->buf.head) != NULL) {
 			int count;
 			char *char_buf;
@@ -423,6 +424,15 @@
 			if (!count) {
 				if (head->next == NULL)
 					break;
+				/*
+				  There's a possibility tty might get new buffer
+				  added during the unlock window below. We could
+				  end up spinning in here forever hogging the CPU
+				  completely. To avoid this let's have a rest each
+				  time we processed the tail buffer.
+				*/
+				if (tail == head)
+					seen_tail = 1;
 				tty->buf.head = head->next;
 				tty_buffer_free(tty, head);
 				continue;
@@ -432,7 +442,7 @@
 			   line discipline as we want to empty the queue */
 			if (test_bit(TTY_FLUSHPENDING, &tty->flags))
 				break;
-			if (!tty->receive_room) {
+			if (!tty->receive_room || seen_tail) {
 				schedule_delayed_work(&tty->buf.work, 1);
 				break;
 			}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 412f977..d8e96b0 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,6 +47,7 @@
 
 static DEFINE_SPINLOCK(tty_ldisc_lock);
 static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
 /* Line disc dispatch table */
 static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
 
@@ -83,6 +84,7 @@
 		return;
 	}
 	local_irq_restore(flags);
+	wake_up(&tty_ldisc_idle);
 }
 
 /**
@@ -531,6 +533,23 @@
 }
 
 /**
+ *	tty_ldisc_wait_idle	-	wait for the ldisc to become idle
+ *	@tty: tty to wait for
+ *
+ *	Wait for the line discipline to become idle. The discipline must
+ *	have been halted for this to guarantee it remains idle.
+ */
+static int tty_ldisc_wait_idle(struct tty_struct *tty)
+{
+	int ret;
+	ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+			atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+	if (ret < 0)
+		return ret;
+	return ret > 0 ? 0 : -EBUSY;
+}
+
+/**
  *	tty_set_ldisc		-	set line discipline
  *	@tty: the terminal to set
  *	@ldisc: the line discipline
@@ -634,8 +653,17 @@
 
 	flush_scheduled_work();
 
+	retval = tty_ldisc_wait_idle(tty);
+
 	tty_lock();
 	mutex_lock(&tty->ldisc_mutex);
+
+	/* handle wait idle failure locked */
+	if (retval) {
+		tty_ldisc_put(new_ldisc);
+		goto enable;
+	}
+
 	if (test_bit(TTY_HUPPED, &tty->flags)) {
 		/* We were raced by the hangup method. It will have stomped
 		   the ldisc data and closed the ldisc down */
@@ -669,6 +697,7 @@
 
 	tty_ldisc_put(o_ldisc);
 
+enable:
 	/*
 	 *	Allow ldisc referencing to occur again
 	 */
@@ -714,9 +743,12 @@
  *	state closed
  */
 
-static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
 {
-	struct tty_ldisc *ld;
+	struct tty_ldisc *ld = tty_ldisc_get(ldisc);
+
+	if (IS_ERR(ld))
+		return -1;
 
 	tty_ldisc_close(tty, tty->ldisc);
 	tty_ldisc_put(tty->ldisc);
@@ -724,10 +756,10 @@
 	/*
 	 *	Switch the line discipline back
 	 */
-	ld = tty_ldisc_get(ldisc);
-	BUG_ON(IS_ERR(ld));
 	tty_ldisc_assign(tty, ld);
 	tty_set_termios_ldisc(tty, ldisc);
+
+	return 0;
 }
 
 /**
@@ -802,13 +834,16 @@
 	   a FIXME */
 	if (tty->ldisc) {	/* Not yet closed */
 		if (reset == 0) {
-			tty_ldisc_reinit(tty, tty->termios->c_line);
-			err = tty_ldisc_open(tty, tty->ldisc);
+
+			if (!tty_ldisc_reinit(tty, tty->termios->c_line))
+				err = tty_ldisc_open(tty, tty->ldisc);
+			else
+				err = 1;
 		}
 		/* If the re-open fails or we reset then go to N_TTY. The
 		   N_TTY open cannot fail */
 		if (reset || err) {
-			tty_ldisc_reinit(tty, N_TTY);
+			BUG_ON(tty_ldisc_reinit(tty, N_TTY));
 			WARN_ON(tty_ldisc_open(tty, tty->ldisc));
 		}
 		tty_ldisc_enable(tty);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 273ab44..eab3a1f 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -553,12 +553,12 @@
 vcs_poll(struct file *file, poll_table *wait)
 {
 	struct vcs_poll_data *poll = vcs_poll_data_get(file);
-	int ret = 0;
+	int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
 
 	if (poll) {
 		poll_wait(file, &poll->waitq, wait);
-		if (!poll->seen_last_update)
-			ret = POLLIN | POLLRDNORM;
+		if (poll->seen_last_update)
+			ret = DEFAULT_POLLMASK;
 	}
 	return ret;
 }
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index f1aaff6..045bb4b 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -965,10 +965,11 @@
 
 static int proc_connectinfo(struct dev_state *ps, void __user *arg)
 {
-	struct usbdevfs_connectinfo ci;
+	struct usbdevfs_connectinfo ci = {
+		.devnum = ps->dev->devnum,
+		.slow = ps->dev->speed == USB_SPEED_LOW
+	};
 
-	ci.devnum = ps->dev->devnum;
-	ci.slow = ps->dev->speed == USB_SPEED_LOW;
 	if (copy_to_user(arg, &ci, sizeof(ci)))
 		return -EFAULT;
 	return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b739ca8..607d0db 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -158,7 +158,7 @@
 	boolean "Freescale Highspeed USB DR Peripheral Controller"
 	depends on FSL_SOC || ARCH_MXC
 	select USB_GADGET_DUALSPEED
-	select USB_FSL_MPH_DR_OF
+	select USB_FSL_MPH_DR_OF if OF
 	help
 	   Some of Freescale PowerPC processors have a High Speed
 	   Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index 566cb23..e7e0c69 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -251,7 +251,8 @@
 					got_region:1,
 					req_config:1,
 					configured:1,
-					enabled:1;
+					enabled:1,
+					registered:1;
 
 	/* pci state used to access those endpoints */
 	struct pci_dev			*pdev;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 01e5354..40f7716 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -105,11 +105,15 @@
 	wait_queue_head_t	close_wait;	/* wait for last close */
 
 	struct list_head	read_pool;
+	int read_started;
+	int read_allocated;
 	struct list_head	read_queue;
 	unsigned		n_read;
 	struct tasklet_struct	push;
 
 	struct list_head	write_pool;
+	int write_started;
+	int write_allocated;
 	struct gs_buf		port_write_buf;
 	wait_queue_head_t	drain_wait;	/* wait while writes drain */
 
@@ -363,6 +367,9 @@
 		struct usb_request	*req;
 		int			len;
 
+		if (port->write_started >= QUEUE_SIZE)
+			break;
+
 		req = list_entry(pool->next, struct usb_request, list);
 		len = gs_send_packet(port, req->buf, in->maxpacket);
 		if (len == 0) {
@@ -397,6 +404,8 @@
 			break;
 		}
 
+		port->write_started++;
+
 		/* abort immediately after disconnect */
 		if (!port->port_usb)
 			break;
@@ -418,7 +427,6 @@
 {
 	struct list_head	*pool = &port->read_pool;
 	struct usb_ep		*out = port->port_usb->out;
-	unsigned		started = 0;
 
 	while (!list_empty(pool)) {
 		struct usb_request	*req;
@@ -430,6 +438,9 @@
 		if (!tty)
 			break;
 
+		if (port->read_started >= QUEUE_SIZE)
+			break;
+
 		req = list_entry(pool->next, struct usb_request, list);
 		list_del(&req->list);
 		req->length = out->maxpacket;
@@ -447,13 +458,13 @@
 			list_add(&req->list, pool);
 			break;
 		}
-		started++;
+		port->read_started++;
 
 		/* abort immediately after disconnect */
 		if (!port->port_usb)
 			break;
 	}
-	return started;
+	return port->read_started;
 }
 
 /*
@@ -535,6 +546,7 @@
 		}
 recycle:
 		list_move(&req->list, &port->read_pool);
+		port->read_started--;
 	}
 
 	/* Push from tty to ldisc; without low_latency set this is handled by
@@ -587,6 +599,7 @@
 
 	spin_lock(&port->port_lock);
 	list_add(&req->list, &port->write_pool);
+	port->write_started--;
 
 	switch (req->status) {
 	default:
@@ -608,7 +621,8 @@
 	spin_unlock(&port->port_lock);
 }
 
-static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
+static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
+							 int *allocated)
 {
 	struct usb_request	*req;
 
@@ -616,25 +630,31 @@
 		req = list_entry(head->next, struct usb_request, list);
 		list_del(&req->list);
 		gs_free_req(ep, req);
+		if (allocated)
+			(*allocated)--;
 	}
 }
 
 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
-		void (*fn)(struct usb_ep *, struct usb_request *))
+		void (*fn)(struct usb_ep *, struct usb_request *),
+		int *allocated)
 {
 	int			i;
 	struct usb_request	*req;
+	int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
 
 	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
 	 * do quite that many this time, don't fail ... we just won't
 	 * be as speedy as we might otherwise be.
 	 */
-	for (i = 0; i < QUEUE_SIZE; i++) {
+	for (i = 0; i < n; i++) {
 		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
 		if (!req)
 			return list_empty(head) ? -ENOMEM : 0;
 		req->complete = fn;
 		list_add_tail(&req->list, head);
+		if (allocated)
+			(*allocated)++;
 	}
 	return 0;
 }
@@ -661,14 +681,15 @@
 	 * configurations may use different endpoints with a given port;
 	 * and high speed vs full speed changes packet sizes too.
 	 */
-	status = gs_alloc_requests(ep, head, gs_read_complete);
+	status = gs_alloc_requests(ep, head, gs_read_complete,
+		&port->read_allocated);
 	if (status)
 		return status;
 
 	status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
-			gs_write_complete);
+			gs_write_complete, &port->write_allocated);
 	if (status) {
-		gs_free_requests(ep, head);
+		gs_free_requests(ep, head, &port->read_allocated);
 		return status;
 	}
 
@@ -680,8 +701,9 @@
 	if (started) {
 		tty_wakeup(port->port_tty);
 	} else {
-		gs_free_requests(ep, head);
-		gs_free_requests(port->port_usb->in, &port->write_pool);
+		gs_free_requests(ep, head, &port->read_allocated);
+		gs_free_requests(port->port_usb->in, &port->write_pool,
+			&port->write_allocated);
 		status = -EIO;
 	}
 
@@ -1315,8 +1337,12 @@
 	spin_lock_irqsave(&port->port_lock, flags);
 	if (port->open_count == 0 && !port->openclose)
 		gs_buf_free(&port->port_write_buf);
-	gs_free_requests(gser->out, &port->read_pool);
-	gs_free_requests(gser->out, &port->read_queue);
-	gs_free_requests(gser->in, &port->write_pool);
+	gs_free_requests(gser->out, &port->read_pool, NULL);
+	gs_free_requests(gser->out, &port->read_queue, NULL);
+	gs_free_requests(gser->in, &port->write_pool, NULL);
+
+	port->read_allocated = port->read_started =
+		port->write_allocated = port->write_started = 0;
+
 	spin_unlock_irqrestore(&port->port_lock, flags);
 }
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2391c39..6f4f8e6 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -122,7 +122,7 @@
 	bool "Support for Freescale on-chip EHCI USB controller"
 	depends on USB_EHCI_HCD && FSL_SOC
 	select USB_EHCI_ROOT_HUB_TT
-	select USB_FSL_MPH_DR_OF
+	select USB_FSL_MPH_DR_OF if OF
 	---help---
 	  Variation of ARC USB block used in some Freescale chips.
 
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ac9c4d7..bce8505 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,6 +36,8 @@
 static int ehci_mxc_setup(struct usb_hcd *hcd)
 {
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct device *dev = hcd->self.controller;
+	struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
 	int retval;
 
 	/* EHCI registers start at offset 0x100 */
@@ -63,6 +65,12 @@
 
 	ehci_reset(ehci);
 
+	/* set up the PORTSCx register */
+	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
+
+	/* is this really needed? */
+	msleep(10);
+
 	ehci_port_power(ehci, 0);
 	return 0;
 }
@@ -114,7 +122,7 @@
 	struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
 	struct usb_hcd *hcd;
 	struct resource *res;
-	int irq, ret, temp;
+	int irq, ret;
 	struct ehci_mxc_priv *priv;
 	struct device *dev = &pdev->dev;
 
@@ -188,10 +196,6 @@
 		clk_enable(priv->ahbclk);
 	}
 
-	/* set up the PORTSCx register */
-	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
-	mdelay(10);
-
 	/* setup specific usb hw */
 	ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
 	if (ret < 0)
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 10e1872f..931d588 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -273,4 +273,4 @@
 	},
 };
 
-MODULE_ALIAS("platfrom:jz4740-ohci");
+MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 3756641..c9078e4 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -553,6 +553,7 @@
 			/* needed for power consumption */
 			struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
 
+			memset(&info, 0, sizeof(info));
 			/* directly from the descriptor */
 			info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
 			info.product = dev->product_id;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 70d00e9..dd573ab 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3008,6 +3008,7 @@
 #else
 			x.sisusb_conactive  = 0;
 #endif
+			memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
 
 			if (copy_to_user((void __user *)arg, &x, sizeof(x)))
 				retval = -EFAULT;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 611a9d2..fcb5206 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -171,8 +171,9 @@
 	}
 
 	/* Start sampling ID pin, when plug is removed from MUSB */
-	if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
-		|| musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+	if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
+		|| musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) ||
+		(musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
 		mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
 		musb->a_wait_bcon = TIMER_DELAY;
 	}
@@ -323,30 +324,8 @@
 	return -EIO;
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static void musb_platform_reg_init(struct musb *musb)
 {
-
-	/*
-	 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
-	 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
-	 * be low for DEVICE mode and high for HOST mode. We set it high
-	 * here because we are in host mode
-	 */
-
-	if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
-		printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
-			musb->config->gpio_vrsel);
-		return -ENODEV;
-	}
-	gpio_direction_output(musb->config->gpio_vrsel, 0);
-
-	usb_nop_xceiv_register();
-	musb->xceiv = otg_get_transceiver();
-	if (!musb->xceiv) {
-		gpio_free(musb->config->gpio_vrsel);
-		return -ENODEV;
-	}
-
 	if (ANOMALY_05000346) {
 		bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
 		SSYNC();
@@ -358,7 +337,8 @@
 	}
 
 	/* Configure PLL oscillator register */
-	bfin_write_USB_PLLOSC_CTRL(0x30a8);
+	bfin_write_USB_PLLOSC_CTRL(0x3080 |
+			((480/musb->config->clkin) << 1));
 	SSYNC();
 
 	bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
@@ -380,6 +360,33 @@
 				EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
 				EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
 	SSYNC();
+}
+
+int __init musb_platform_init(struct musb *musb, void *board_data)
+{
+
+	/*
+	 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
+	 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
+	 * be low for DEVICE mode and high for HOST mode. We set it high
+	 * here because we are in host mode
+	 */
+
+	if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
+		printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
+			musb->config->gpio_vrsel);
+		return -ENODEV;
+	}
+	gpio_direction_output(musb->config->gpio_vrsel, 0);
+
+	usb_nop_xceiv_register();
+	musb->xceiv = otg_get_transceiver();
+	if (!musb->xceiv) {
+		gpio_free(musb->config->gpio_vrsel);
+		return -ENODEV;
+	}
+
+	musb_platform_reg_init(musb);
 
 	if (is_host_enabled(musb)) {
 		musb->board_set_vbus = bfin_set_vbus;
@@ -394,6 +401,27 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+void musb_platform_save_context(struct musb *musb,
+			struct musb_context_registers *musb_context)
+{
+	if (is_host_active(musb))
+		/*
+		 * During hibernate gpio_vrsel will change from high to low
+		 * low which will generate wakeup event resume the system
+		 * immediately.  Set it to 0 before hibernate to avoid this
+		 * wakeup event.
+		 */
+		gpio_set_value(musb->config->gpio_vrsel, 0);
+}
+
+void musb_platform_restore_context(struct musb *musb,
+			struct musb_context_registers *musb_context)
+{
+	musb_platform_reg_init(musb);
+}
+#endif
+
 int musb_platform_exit(struct musb *musb)
 {
 	gpio_free(musb->config->gpio_vrsel);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c9f9024..e6669fc 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -552,7 +552,8 @@
 	if (int_usb & MUSB_INTR_SESSREQ) {
 		void __iomem *mbase = musb->mregs;
 
-		if (devctl & MUSB_DEVCTL_BDEVICE) {
+		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
+				&& (devctl & MUSB_DEVCTL_BDEVICE)) {
 			DBG(3, "SessReq while on B state\n");
 			return IRQ_HANDLED;
 		}
@@ -1052,6 +1053,11 @@
 		clk_put(musb->clock);
 	spin_unlock_irqrestore(&musb->lock, flags);
 
+	if (!is_otg_enabled(musb) && is_host_enabled(musb))
+		usb_remove_hcd(musb_to_hcd(musb));
+	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+	musb_platform_exit(musb);
+
 	/* FIXME power down */
 }
 
@@ -2244,13 +2250,6 @@
 	 */
 	musb_exit_debugfs(musb);
 	musb_shutdown(pdev);
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	if (musb->board_mode == MUSB_HOST)
-		usb_remove_hcd(musb_to_hcd(musb));
-#endif
-	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
-	musb_platform_exit(musb);
-	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 
 	musb_free(musb);
 	iounmap(ctrl_base);
@@ -2411,9 +2410,6 @@
 	unsigned long	flags;
 	struct musb	*musb = dev_to_musb(&pdev->dev);
 
-	if (!musb->clock)
-		return 0;
-
 	spin_lock_irqsave(&musb->lock, flags);
 
 	if (is_peripheral_active(musb)) {
@@ -2428,10 +2424,12 @@
 
 	musb_save_context(musb);
 
-	if (musb->set_clock)
-		musb->set_clock(musb->clock, 0);
-	else
-		clk_disable(musb->clock);
+	if (musb->clock) {
+		if (musb->set_clock)
+			musb->set_clock(musb->clock, 0);
+		else
+			clk_disable(musb->clock);
+	}
 	spin_unlock_irqrestore(&musb->lock, flags);
 	return 0;
 }
@@ -2441,13 +2439,12 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct musb	*musb = dev_to_musb(&pdev->dev);
 
-	if (!musb->clock)
-		return 0;
-
-	if (musb->set_clock)
-		musb->set_clock(musb->clock, 1);
-	else
-		clk_enable(musb->clock);
+	if (musb->clock) {
+		if (musb->set_clock)
+			musb->set_clock(musb->clock, 1);
+		else
+			clk_enable(musb->clock);
+	}
 
 	musb_restore_context(musb);
 
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 69797e5..febaabc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -487,7 +487,7 @@
 };
 
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
-    defined(CONFIG_ARCH_OMAP4)
+    defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
 extern void musb_platform_save_context(struct musb *musb,
 		struct musb_context_registers *musb_context);
 extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 5d81504..36cfd06 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -644,10 +644,8 @@
 	 */
 
 				csr |= MUSB_RXCSR_DMAENAB;
-				if (!musb_ep->hb_mult &&
-					musb_ep->hw_ep->rx_double_buffered)
-					csr |= MUSB_RXCSR_AUTOCLEAR;
 #ifdef USE_MODE1
+				csr |= MUSB_RXCSR_AUTOCLEAR;
 				/* csr |= MUSB_RXCSR_DMAMODE; */
 
 				/* this special sequence (enabling and then
@@ -656,6 +654,10 @@
 				 */
 				musb_writew(epio, MUSB_RXCSR,
 					csr | MUSB_RXCSR_DMAMODE);
+#else
+				if (!musb_ep->hb_mult &&
+					musb_ep->hw_ep->rx_double_buffered)
+					csr |= MUSB_RXCSR_AUTOCLEAR;
 #endif
 				musb_writew(epio, MUSB_RXCSR, csr);
 
@@ -807,7 +809,7 @@
 
 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
 		/* Autoclear doesn't clear RxPktRdy for short packets */
-		if ((dma->desired_mode == 0)
+		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
 				|| (dma->actual_len
 					& (musb_ep->packet_sz - 1))) {
 			/* ack the read! */
@@ -818,8 +820,16 @@
 		/* incomplete, and not short? wait for next IN packet */
 		if ((request->actual < request->length)
 				&& (musb_ep->dma->actual_len
-					== musb_ep->packet_sz))
+					== musb_ep->packet_sz)) {
+			/* In double buffer case, continue to unload fifo if
+ 			 * there is Rx packet in FIFO.
+ 			 **/
+			csr = musb_readw(epio, MUSB_RXCSR);
+			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
+				hw_ep->rx_double_buffered)
+				goto exit;
 			return;
+		}
 #endif
 		musb_g_giveback(musb_ep, request, 0);
 
@@ -827,7 +837,7 @@
 		if (!request)
 			return;
 	}
-
+exit:
 	/* Analyze request */
 	rxstate(musb, to_musb_request(request));
 }
@@ -916,13 +926,9 @@
 		 * likewise high bandwidth periodic tx
 		 */
 		/* Set TXMAXP with the FIFO size of the endpoint
-		 * to disable double buffering mode. Currently, It seems that double
-		 * buffering has problem if musb RTL revision number < 2.0.
+		 * to disable double buffering mode.
 		 */
-		if (musb->hwvers < MUSB_HWVERS_2000)
-			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
-		else
-			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
 
 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
 		if (musb_readw(regs, MUSB_TXCSR)
@@ -958,10 +964,7 @@
 		/* Set RXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		if (musb->hwvers < MUSB_HWVERS_2000)
-			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
-		else
-			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
 
 		/* force shared fifo to OUT-only mode */
 		if (hw_ep->is_shared_fifo) {
@@ -1166,8 +1169,6 @@
 						: DMA_FROM_DEVICE);
 			request->mapped = 0;
 		}
-	} else if (!req->buf) {
-		return -ENODATA;
 	} else
 		request->mapped = 0;
 
@@ -1695,8 +1696,10 @@
 	musb_platform_try_idle(musb, 0);
 
 	status = device_register(&musb->g.dev);
-	if (status != 0)
+	if (status != 0) {
+		put_device(&musb->g.dev);
 		the_gadget = NULL;
+	}
 	return status;
 }
 
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 2442675..5a727c5 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -633,8 +633,9 @@
 	return 0;
 }
 
-static inline void  musb_read_txhubport(void __iomem *mbase, u8 epnum)
+static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
 {
+	return 0;
 }
 
 #endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 6f771af..563114d 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -158,6 +158,8 @@
 				dma_addr_t dma_addr, u32 len)
 {
 	struct musb_dma_channel *musb_channel = channel->private_data;
+	struct musb_dma_controller *controller = musb_channel->controller;
+	struct musb *musb = controller->private_data;
 
 	DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
 		musb_channel->epnum,
@@ -167,6 +169,18 @@
 	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
 		channel->status == MUSB_DMA_STATUS_BUSY);
 
+	/*
+	 * The DMA engine in RTL1.8 and above cannot handle
+	 * DMA addresses that are not aligned to a 4 byte boundary.
+	 * It ends up masking the last two bits of the address
+	 * programmed in DMA_ADDR.
+	 *
+	 * Fail such DMA transfers, so that the backup PIO mode
+	 * can carry out the transfer
+	 */
+	if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
+		return false;
+
 	channel->actual_len = 0;
 	musb_channel->start_addr = dma_addr;
 	musb_channel->len = len;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89a9a58..76f8b35 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -794,6 +794,8 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ },					/* Optional parameter entry */
 	{ }					/* Terminating entry */
 };
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 7dfe02f1..263f625 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1100,3 +1100,10 @@
 #define FTDI_SCIENCESCOPE_LOGBOOKML_PID		0xFF18
 #define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID	0xFF1C
 #define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID	0xFF1D
+
+/*
+ * Milkymist One JTAG/Serial
+ */
+#define QIHARDWARE_VID			0x20B7
+#define MILKYMISTONE_JTAGSERIAL_PID	0x0713
+
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2297fb1..ef2977d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -518,7 +518,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c
index 436e4f7..e45e673 100644
--- a/drivers/uwb/allocator.c
+++ b/drivers/uwb/allocator.c
@@ -326,7 +326,8 @@
 	int bit_index;
 
 	ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
-	
+	if (!ai)
+		return UWB_RSV_ALLOC_NOT_FOUND;
 	ai->min_mas = rsv->min_mas;
 	ai->max_mas = rsv->max_mas;
 	ai->max_interval = rsv->max_interval;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f5..321a0c8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1299,9 +1299,6 @@
 		evtchn_to_irq[evtchn] = irq;
 		irq_info[irq] = mk_virq_info(evtchn, virq);
 		bind_evtchn_to_cpu(evtchn, cpu);
-
-		/* Ready for use. */
-		unmask_evtchn(evtchn);
 	}
 }
 
@@ -1327,10 +1324,6 @@
 		evtchn_to_irq[evtchn] = irq;
 		irq_info[irq] = mk_ipi_info(evtchn, ipi);
 		bind_evtchn_to_cpu(evtchn, cpu);
-
-		/* Ready for use. */
-		unmask_evtchn(evtchn);
-
 	}
 }
 
@@ -1390,6 +1383,7 @@
 void xen_irq_resume(void)
 {
 	unsigned int cpu, irq, evtchn;
+	struct irq_desc *desc;
 
 	init_evtchn_cpu_bindings();
 
@@ -1408,6 +1402,23 @@
 		restore_cpu_virqs(cpu);
 		restore_cpu_ipis(cpu);
 	}
+
+	/*
+	 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
+	 * are not handled by the IRQ core.
+	 */
+	for_each_irq_desc(irq, desc) {
+		if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
+			continue;
+		if (desc->status & IRQ_DISABLED)
+			continue;
+
+		evtchn = evtchn_from_irq(irq);
+		if (evtchn == -1)
+			continue;
+
+		unmask_evtchn(evtchn);
+	}
 }
 
 static struct irq_chip xen_dynamic_chip __read_mostly = {
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2a75474..c7ea9bc 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -50,7 +50,7 @@
 #define N_V253		19	/* Codec control over voice modem */
 #define N_CAIF		20      /* CAIF protocol for talking to modems */
 #define N_GSM0710	21	/* GSM 0710 Mux */
-#define N_TI_WL	22	/* for TI's WL BT, FM, GPS combo chips */
+#define N_TI_WL		22	/* for TI's WL BT, FM, GPS combo chips */
 
 /*
  * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 35fe6ab..24300d8 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -797,7 +797,7 @@
  * @disconnect: Called when the interface is no longer accessible, usually
  *	because its device has been (or is being) disconnected or the
  *	driver module is being unloaded.
- * @ioctl: Used for drivers that want to talk to userspace through
+ * @unlocked_ioctl: Used for drivers that want to talk to userspace through
  *	the "usbfs" filesystem.  This lets devices provide ways to
  *	expose information to user space regardless of where they
  *	do (or don't) show up otherwise in the filesystem.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index ee2dd1d..2387f9f 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -89,6 +89,8 @@
 	/* A GPIO controlling VRSEL in Blackfin */
 	unsigned int	gpio_vrsel;
 	unsigned int	gpio_vrsel_active;
+	/* musb CLKIN in Blackfin in MHZ */
+	unsigned char   clkin;
 #endif
 
 };