Merge master.kernel.org:/pub/scm/linux/kernel/git/bart/ide-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/bart/ide-2.6: (33 commits)
  amd74xx: remove /proc/ide/amd74xx
  amd74xx/via82cxxx: don't initialize drive->dn
  sis5513: remove /proc/ide/sis
  ide: remove CONFIG_IDEDMA_ONLYDISK
  ide: add "hdx=nodma" kernel parameter
  ide: remove hwif->autodma and drive->autodma
  ide: remove "idex=dma" kernel parameter
  ide: remove CONFIG_BLK_DEV_IDEDMA_FORCED
  ide: use PCI_VDEVICE() macro
  sis5513: clear prefetch and postwrite for ATAPI devices
  it8213/piix/slc90e66: "de-couple" PIO and UDMA modes
  ide: unexport noautodma
  ide: unexport ide_tune_dma
  ide: remove ->ide_dma_check (take 2)
  ide-pmac: add PIO autotune fallback to ->ide_dma_check
  ide-cris: add PIO autotune fallback to ->ide_dma_check
  sl82c105: add PIO autotune fallback to ->ide_dma_check
  cs5530/sc1200: add PIO autotune fallback to ->ide_dma_check
  ide: remove ide_use_fast_pio()
  ide: remove drive->init_speed zeroing
  ...
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 777c8d8..c5cfcfa 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -869,6 +869,7 @@
 	void *cpu_data;
 
 	cpu_data = per_cpu_init();
+#ifdef CONFIG_SMP
 	/*
 	 * insert boot cpu into sibling and core mapes
 	 * (must be done after per_cpu area is setup)
@@ -877,6 +878,7 @@
 		cpu_set(0, per_cpu(cpu_sibling_map, 0));
 		cpu_set(0, cpu_core_map[0]);
 	}
+#endif
 
 	/*
 	 * We set ar.k3 so that assembly code in MCA handler can compute
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 29ed495..702d884 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -243,7 +243,8 @@
 					"physical %p.\n", start, p, __pa(p));
 
 		mapped = htab_bolt_mapping(start, start + page_size,
-					__pa(p), mode_rw, mmu_linear_psize);
+					__pa(p), mode_rw, mmu_linear_psize,
+					mmu_kernel_ssize);
 		BUG_ON(mapped < 0);
 	}
 
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index d6ed8e5..e8756e5 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -64,10 +64,10 @@
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
 
 $(obj)/zImage:  IMAGE_OFFSET := 0x1000
-$(obj)/zImage:  EXTRA_AFLAGS := $(SVGA_MODE) $(RAMDISK)
+$(obj)/zImage:  asflags-y := $(SVGA_MODE) $(RAMDISK)
 $(obj)/bzImage: IMAGE_OFFSET := 0x100000
-$(obj)/bzImage: EXTRA_CFLAGS := -D__BIG_KERNEL__
-$(obj)/bzImage: EXTRA_AFLAGS := $(SVGA_MODE) $(RAMDISK) -D__BIG_KERNEL__
+$(obj)/bzImage: ccflags-y := -D__BIG_KERNEL__
+$(obj)/bzImage: asflags-y := $(SVGA_MODE) $(RAMDISK) -D__BIG_KERNEL__
 $(obj)/bzImage: BUILDFLAGS   := -b
 
 quiet_cmd_image = BUILD   $@
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 0647130..60f1a89 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -25,11 +25,14 @@
 #include <linux/device.h>
 #include <linux/vmalloc.h>
 #include <linux/poll.h>
+#include <linux/preempt.h>
+#include <linux/time.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
 #include <linux/idr.h>
 #include <linux/compat.h>
 #include <linux/firewire-cdev.h>
+#include <asm/system.h>
 #include <asm/uaccess.h>
 #include "fw-transaction.h"
 #include "fw-topology.h"
@@ -140,11 +143,10 @@
 	event->v[1].size = size1;
 
 	spin_lock_irqsave(&client->lock, flags);
-
 	list_add_tail(&event->link, &client->event_list);
-	wake_up_interruptible(&client->wait);
-
 	spin_unlock_irqrestore(&client->lock, flags);
+
+	wake_up_interruptible(&client->wait);
 }
 
 static int
@@ -621,20 +623,19 @@
 	     size_t header_length, void *header, void *data)
 {
 	struct client *client = data;
-	struct iso_interrupt *interrupt;
+	struct iso_interrupt *irq;
 
-	interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC);
-	if (interrupt == NULL)
+	irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
+	if (irq == NULL)
 		return;
 
-	interrupt->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
-	interrupt->interrupt.closure   = client->iso_closure;
-	interrupt->interrupt.cycle     = cycle;
-	interrupt->interrupt.header_length = header_length;
-	memcpy(interrupt->interrupt.header, header, header_length);
-	queue_event(client, &interrupt->event,
-		    &interrupt->interrupt,
-		    sizeof(interrupt->interrupt) + header_length, NULL, 0);
+	irq->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
+	irq->interrupt.closure   = client->iso_closure;
+	irq->interrupt.cycle     = cycle;
+	irq->interrupt.header_length = header_length;
+	memcpy(irq->interrupt.header, header, header_length);
+	queue_event(client, &irq->event, &irq->interrupt,
+		    sizeof(irq->interrupt) + header_length, NULL, 0);
 }
 
 static int ioctl_create_iso_context(struct client *client, void *buffer)
@@ -812,6 +813,28 @@
 	return fw_iso_context_stop(client->iso_context);
 }
 
+static int ioctl_get_cycle_timer(struct client *client, void *buffer)
+{
+	struct fw_cdev_get_cycle_timer *request = buffer;
+	struct fw_card *card = client->device->card;
+	unsigned long long bus_time;
+	struct timeval tv;
+	unsigned long flags;
+
+	preempt_disable();
+	local_irq_save(flags);
+
+	bus_time = card->driver->get_bus_time(card);
+	do_gettimeofday(&tv);
+
+	local_irq_restore(flags);
+	preempt_enable();
+
+	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
+	request->cycle_timer = bus_time & 0xffffffff;
+	return 0;
+}
+
 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
 	ioctl_get_info,
 	ioctl_send_request,
@@ -825,6 +848,7 @@
 	ioctl_queue_iso,
 	ioctl_start_iso,
 	ioctl_stop_iso,
+	ioctl_get_cycle_timer,
 };
 
 static int
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index d13e6a6..894d4a9 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -102,11 +102,6 @@
 #define CSR_INSTANCE		0x18
 #define CSR_DIRECTORY_ID	0x20
 
-#define SBP2_COMMAND_SET_SPECIFIER	0x38
-#define SBP2_COMMAND_SET		0x39
-#define SBP2_COMMAND_SET_REVISION	0x3b
-#define SBP2_FIRMWARE_REVISION		0x3c
-
 struct fw_csr_iterator {
 	u32 *p;
 	u32 *end;
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index e14c1ca7..2f307c4 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -18,21 +18,23 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
 
-#include <asm/uaccess.h>
-#include <asm/semaphore.h>
+#include <asm/page.h>
+#include <asm/system.h>
 
-#include "fw-transaction.h"
 #include "fw-ohci.h"
+#include "fw-transaction.h"
 
 #define DESCRIPTOR_OUTPUT_MORE		0
 #define DESCRIPTOR_OUTPUT_LAST		(1 << 12)
@@ -678,6 +680,9 @@
 
 	/* FIXME: Document how the locking works. */
 	if (ohci->generation != packet->generation) {
+		if (packet->payload_length > 0)
+			dma_unmap_single(ohci->card.device, payload_bus,
+					 packet->payload_length, DMA_TO_DEVICE);
 		packet->ack = RCODE_GENERATION;
 		return -1;
 	}
@@ -912,10 +917,15 @@
 
 	reg = reg_read(ohci, OHCI1394_NodeID);
 	if (!(reg & OHCI1394_NodeID_idValid)) {
-		fw_error("node ID not valid, new bus reset in progress\n");
+		fw_notify("node ID not valid, new bus reset in progress\n");
 		return;
 	}
-	ohci->node_id = reg & 0xffff;
+	if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
+		fw_notify("malconfigured bus\n");
+		return;
+	}
+	ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
+			       OHCI1394_NodeID_nodeNumber);
 
 	/*
 	 * The count in the SelfIDCount register is the number of
@@ -926,12 +936,14 @@
 
 	self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
 	generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+	rmb();
 
 	for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
 		if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
 			fw_error("inconsistent self IDs\n");
 		ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
 	}
+	rmb();
 
 	/*
 	 * Check the consistency of the self IDs we just read.  The
@@ -1046,6 +1058,9 @@
 		iso_event &= ~(1 << i);
 	}
 
+	if (unlikely(event & OHCI1394_postedWriteErr))
+		fw_error("PCI posted write error\n");
+
 	if (event & OHCI1394_cycle64Seconds) {
 		cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
 		if ((cycle_time & 0x80000000) == 0)
@@ -1119,8 +1134,8 @@
 		  OHCI1394_RQPkt | OHCI1394_RSPkt |
 		  OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
 		  OHCI1394_isochRx | OHCI1394_isochTx |
-		  OHCI1394_masterIntEnable |
-		  OHCI1394_cycle64Seconds);
+		  OHCI1394_postedWriteErr | OHCI1394_cycle64Seconds |
+		  OHCI1394_masterIntEnable);
 
 	/* Activate link_on bit and contender bit in our self ID packets.*/
 	if (ohci_update_phy_reg(card, 4, 0,
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
index fa15706..dec4f04 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/fw-ohci.h
@@ -59,6 +59,8 @@
 #define   OHCI1394_LinkControl_cycleSource	(1 << 22)
 #define OHCI1394_NodeID                       0x0E8
 #define   OHCI1394_NodeID_idValid             0x80000000
+#define   OHCI1394_NodeID_nodeNumber          0x0000003f
+#define   OHCI1394_NodeID_busNumber           0x0000ffc0
 #define OHCI1394_PhyControl                   0x0EC
 #define   OHCI1394_PhyControl_Read(addr)	(((addr) << 8) | 0x00008000)
 #define   OHCI1394_PhyControl_ReadDone		0x80000000
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 238730f..5596df6 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -37,11 +37,12 @@
 #include <linux/dma-mapping.h>
 #include <linux/blkdev.h>
 #include <linux/string.h>
+#include <linux/stringify.h>
 #include <linux/timer.h>
+#include <linux/workqueue.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_dbg.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 
@@ -61,36 +62,94 @@
 MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
 		 "(default = Y, use N for concurrent initiators)");
 
+/*
+ * Flags for firmware oddities
+ *
+ * - 128kB max transfer
+ *   Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ *   When scsi_mod probes the device, let the inquiry command look like that
+ *   from MS Windows.
+ *
+ * - skip mode page 8
+ *   Suppress sending of mode_sense for mode page 8 if the device pretends to
+ *   support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ *   Tell sd_mod to correct the last sector number reported by read_capacity.
+ *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ *   Don't use this with devices which don't have this bug.
+ *
+ * - override internal blacklist
+ *   Instead of adding to the built-in blacklist, use only the workarounds
+ *   specified in the module load parameter.
+ *   Useful if a blacklist entry interfered with a non-broken device.
+ */
+#define SBP2_WORKAROUND_128K_MAX_TRANS	0x1
+#define SBP2_WORKAROUND_INQUIRY_36	0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8	0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY	0x8
+#define SBP2_WORKAROUND_OVERRIDE	0x100
+
+static int sbp2_param_workarounds;
+module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+	", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+	", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
+	", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+	", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+	", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+	", or a combination)");
+
 /* I don't know why the SCSI stack doesn't define something like this... */
 typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
 
 static const char sbp2_driver_name[] = "sbp2";
 
-struct sbp2_device {
-	struct kref kref;
-	struct fw_unit *unit;
+/*
+ * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
+ * and one struct scsi_device per sbp2_logical_unit.
+ */
+struct sbp2_logical_unit {
+	struct sbp2_target *tgt;
+	struct list_head link;
+	struct scsi_device *sdev;
 	struct fw_address_handler address_handler;
 	struct list_head orb_list;
-	u64 management_agent_address;
+
 	u64 command_block_agent_address;
-	u32 workarounds;
+	u16 lun;
 	int login_id;
 
 	/*
-	 * We cache these addresses and only update them once we've
-	 * logged in or reconnected to the sbp2 device.  That way, any
-	 * IO to the device will automatically fail and get retried if
-	 * it happens in a window where the device is not ready to
-	 * handle it (e.g. after a bus reset but before we reconnect).
+	 * The generation is updated once we've logged in or reconnected
+	 * to the logical unit.  Thus, I/O to the device will automatically
+	 * fail and get retried if it happens in a window where the device
+	 * is not ready, e.g. after a bus reset but before we reconnect.
 	 */
-	int node_id;
-	int address_high;
 	int generation;
-
 	int retries;
 	struct delayed_work work;
 };
 
+/*
+ * We create one struct sbp2_target per IEEE 1212 Unit Directory
+ * and one struct Scsi_Host per sbp2_target.
+ */
+struct sbp2_target {
+	struct kref kref;
+	struct fw_unit *unit;
+
+	u64 management_agent_address;
+	int directory_id;
+	int node_id;
+	int address_high;
+
+	unsigned workarounds;
+	struct list_head lu_list;
+};
+
 #define SBP2_MAX_SG_ELEMENT_LENGTH	0xf000
 #define SBP2_MAX_SECTORS		255	/* Max sectors supported */
 #define SBP2_ORB_TIMEOUT		2000	/* Timeout in ms */
@@ -101,17 +160,9 @@
 #define SBP2_DIRECTION_FROM_MEDIA	0x1
 
 /* Unit directory keys */
-#define SBP2_COMMAND_SET_SPECIFIER	0x38
-#define SBP2_COMMAND_SET		0x39
-#define SBP2_COMMAND_SET_REVISION	0x3b
-#define SBP2_FIRMWARE_REVISION		0x3c
-
-/* Flags for detected oddities and brokeness */
-#define SBP2_WORKAROUND_128K_MAX_TRANS	0x1
-#define SBP2_WORKAROUND_INQUIRY_36	0x2
-#define SBP2_WORKAROUND_MODE_SENSE_8	0x4
-#define SBP2_WORKAROUND_FIX_CAPACITY	0x8
-#define SBP2_WORKAROUND_OVERRIDE	0x100
+#define SBP2_CSR_FIRMWARE_REVISION	0x3c
+#define SBP2_CSR_LOGICAL_UNIT_NUMBER	0x14
+#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY	0xd4
 
 /* Management orb opcodes */
 #define SBP2_LOGIN_REQUEST		0x0
@@ -219,7 +270,7 @@
 	} request;
 	struct scsi_cmnd *cmd;
 	scsi_done_fn_t done;
-	struct fw_unit *unit;
+	struct sbp2_logical_unit *lu;
 
 	struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
 	dma_addr_t page_table_bus;
@@ -295,7 +346,7 @@
 		  unsigned long long offset,
 		  void *payload, size_t length, void *callback_data)
 {
-	struct sbp2_device *sd = callback_data;
+	struct sbp2_logical_unit *lu = callback_data;
 	struct sbp2_orb *orb;
 	struct sbp2_status status;
 	size_t header_size;
@@ -319,7 +370,7 @@
 
 	/* Lookup the orb corresponding to this status write. */
 	spin_lock_irqsave(&card->lock, flags);
-	list_for_each_entry(orb, &sd->orb_list, link) {
+	list_for_each_entry(orb, &lu->orb_list, link) {
 		if (STATUS_GET_ORB_HIGH(status) == 0 &&
 		    STATUS_GET_ORB_LOW(status) == orb->request_bus) {
 			orb->rcode = RCODE_COMPLETE;
@@ -329,7 +380,7 @@
 	}
 	spin_unlock_irqrestore(&card->lock, flags);
 
-	if (&orb->link != &sd->orb_list)
+	if (&orb->link != &lu->orb_list)
 		orb->callback(orb, &status);
 	else
 		fw_error("status write for unknown orb\n");
@@ -361,20 +412,20 @@
 		orb->rcode = rcode;
 	if (orb->rcode != RCODE_COMPLETE) {
 		list_del(&orb->link);
+		spin_unlock_irqrestore(&card->lock, flags);
 		orb->callback(orb, NULL);
+	} else {
+		spin_unlock_irqrestore(&card->lock, flags);
 	}
 
-	spin_unlock_irqrestore(&card->lock, flags);
-
 	kref_put(&orb->kref, free_orb);
 }
 
 static void
-sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
+sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
 	      int node_id, int generation, u64 offset)
 {
-	struct fw_device *device = fw_device(unit->device.parent);
-	struct sbp2_device *sd = unit->device.driver_data;
+	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
 	unsigned long flags;
 
 	orb->pointer.high = 0;
@@ -382,7 +433,7 @@
 	fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
 
 	spin_lock_irqsave(&device->card->lock, flags);
-	list_add_tail(&orb->link, &sd->orb_list);
+	list_add_tail(&orb->link, &lu->orb_list);
 	spin_unlock_irqrestore(&device->card->lock, flags);
 
 	/* Take a ref for the orb list and for the transaction callback. */
@@ -395,10 +446,9 @@
 			complete_transaction, orb);
 }
 
-static int sbp2_cancel_orbs(struct fw_unit *unit)
+static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
 {
-	struct fw_device *device = fw_device(unit->device.parent);
-	struct sbp2_device *sd = unit->device.driver_data;
+	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
 	struct sbp2_orb *orb, *next;
 	struct list_head list;
 	unsigned long flags;
@@ -406,7 +456,7 @@
 
 	INIT_LIST_HEAD(&list);
 	spin_lock_irqsave(&device->card->lock, flags);
-	list_splice_init(&sd->orb_list, &list);
+	list_splice_init(&lu->orb_list, &list);
 	spin_unlock_irqrestore(&device->card->lock, flags);
 
 	list_for_each_entry_safe(orb, next, &list, link) {
@@ -433,11 +483,11 @@
 }
 
 static int
-sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
-			 int function, int lun, void *response)
+sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+			 int generation, int function, int lun_or_login_id,
+			 void *response)
 {
-	struct fw_device *device = fw_device(unit->device.parent);
-	struct sbp2_device *sd = unit->device.driver_data;
+	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
 	struct sbp2_management_orb *orb;
 	int retval = -ENOMEM;
 
@@ -458,12 +508,12 @@
 	orb->request.misc =
 		MANAGEMENT_ORB_NOTIFY |
 		MANAGEMENT_ORB_FUNCTION(function) |
-		MANAGEMENT_ORB_LUN(lun);
+		MANAGEMENT_ORB_LUN(lun_or_login_id);
 	orb->request.length =
 		MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
 
-	orb->request.status_fifo.high = sd->address_handler.offset >> 32;
-	orb->request.status_fifo.low  = sd->address_handler.offset;
+	orb->request.status_fifo.high = lu->address_handler.offset >> 32;
+	orb->request.status_fifo.low  = lu->address_handler.offset;
 
 	if (function == SBP2_LOGIN_REQUEST) {
 		orb->request.misc |=
@@ -482,14 +532,14 @@
 	if (dma_mapping_error(orb->base.request_bus))
 		goto fail_mapping_request;
 
-	sbp2_send_orb(&orb->base, unit,
-		      node_id, generation, sd->management_agent_address);
+	sbp2_send_orb(&orb->base, lu, node_id, generation,
+		      lu->tgt->management_agent_address);
 
 	wait_for_completion_timeout(&orb->done,
 				    msecs_to_jiffies(SBP2_ORB_TIMEOUT));
 
 	retval = -EIO;
-	if (sbp2_cancel_orbs(unit) == 0) {
+	if (sbp2_cancel_orbs(lu) == 0) {
 		fw_error("orb reply timed out, rcode=0x%02x\n",
 			 orb->base.rcode);
 		goto out;
@@ -534,10 +584,9 @@
 	kfree(t);
 }
 
-static int sbp2_agent_reset(struct fw_unit *unit)
+static int sbp2_agent_reset(struct sbp2_logical_unit *lu)
 {
-	struct fw_device *device = fw_device(unit->device.parent);
-	struct sbp2_device *sd = unit->device.driver_data;
+	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
 	struct fw_transaction *t;
 	static u32 zero;
 
@@ -546,181 +595,272 @@
 		return -ENOMEM;
 
 	fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
-			sd->node_id, sd->generation, device->max_speed,
-			sd->command_block_agent_address + SBP2_AGENT_RESET,
+			lu->tgt->node_id, lu->generation, device->max_speed,
+			lu->command_block_agent_address + SBP2_AGENT_RESET,
 			&zero, sizeof(zero), complete_agent_reset_write, t);
 
 	return 0;
 }
 
-static void sbp2_reconnect(struct work_struct *work);
-static struct scsi_host_template scsi_driver_template;
-
-static void release_sbp2_device(struct kref *kref)
+static void sbp2_release_target(struct kref *kref)
 {
-	struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
-	struct Scsi_Host *host =
-		container_of((void *)sd, struct Scsi_Host, hostdata[0]);
+	struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
+	struct sbp2_logical_unit *lu, *next;
+	struct Scsi_Host *shost =
+		container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
 
-	scsi_remove_host(host);
-	sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
-				 SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
-	fw_core_remove_address_handler(&sd->address_handler);
-	fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
-	put_device(&sd->unit->device);
-	scsi_host_put(host);
+	list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+		if (lu->sdev)
+			scsi_remove_device(lu->sdev);
+
+		sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
+				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+		fw_core_remove_address_handler(&lu->address_handler);
+		list_del(&lu->link);
+		kfree(lu);
+	}
+	scsi_remove_host(shost);
+	fw_notify("released %s\n", tgt->unit->device.bus_id);
+
+	put_device(&tgt->unit->device);
+	scsi_host_put(shost);
 }
 
+static struct workqueue_struct *sbp2_wq;
+
+static void sbp2_reconnect(struct work_struct *work);
+
 static void sbp2_login(struct work_struct *work)
 {
-	struct sbp2_device *sd =
-		container_of(work, struct sbp2_device, work.work);
-	struct Scsi_Host *host =
-		container_of((void *)sd, struct Scsi_Host, hostdata[0]);
-	struct fw_unit *unit = sd->unit;
+	struct sbp2_logical_unit *lu =
+		container_of(work, struct sbp2_logical_unit, work.work);
+	struct Scsi_Host *shost =
+		container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]);
+	struct scsi_device *sdev;
+	struct scsi_lun eight_bytes_lun;
+	struct fw_unit *unit = lu->tgt->unit;
 	struct fw_device *device = fw_device(unit->device.parent);
 	struct sbp2_login_response response;
-	int generation, node_id, local_node_id, lun, retval;
-
-	/* FIXME: Make this work for multi-lun devices. */
-	lun = 0;
+	int generation, node_id, local_node_id;
 
 	generation    = device->card->generation;
 	node_id       = device->node->node_id;
 	local_node_id = device->card->local_node->node_id;
 
-	if (sbp2_send_management_orb(unit, node_id, generation,
-				     SBP2_LOGIN_REQUEST, lun, &response) < 0) {
-		if (sd->retries++ < 5) {
-			schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
+	if (sbp2_send_management_orb(lu, node_id, generation,
+				SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
+		if (lu->retries++ < 5) {
+			queue_delayed_work(sbp2_wq, &lu->work,
+					   DIV_ROUND_UP(HZ, 5));
 		} else {
-			fw_error("failed to login to %s\n",
-				 unit->device.bus_id);
-			kref_put(&sd->kref, release_sbp2_device);
+			fw_error("failed to login to %s LUN %04x\n",
+				 unit->device.bus_id, lu->lun);
+			kref_put(&lu->tgt->kref, sbp2_release_target);
 		}
 		return;
 	}
 
-	sd->generation   = generation;
-	sd->node_id      = node_id;
-	sd->address_high = local_node_id << 16;
+	lu->generation        = generation;
+	lu->tgt->node_id      = node_id;
+	lu->tgt->address_high = local_node_id << 16;
 
 	/* Get command block agent offset and login id. */
-	sd->command_block_agent_address =
+	lu->command_block_agent_address =
 		((u64) (response.command_block_agent.high & 0xffff) << 32) |
 		response.command_block_agent.low;
-	sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
+	lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
 
-	fw_notify("logged in to sbp2 unit %s (%d retries)\n",
-		  unit->device.bus_id, sd->retries);
-	fw_notify(" - management_agent_address:    0x%012llx\n",
-		  (unsigned long long) sd->management_agent_address);
-	fw_notify(" - command_block_agent_address: 0x%012llx\n",
-		  (unsigned long long) sd->command_block_agent_address);
-	fw_notify(" - status write address:        0x%012llx\n",
-		  (unsigned long long) sd->address_handler.offset);
+	fw_notify("logged in to %s LUN %04x (%d retries)\n",
+		  unit->device.bus_id, lu->lun, lu->retries);
 
 #if 0
 	/* FIXME: The linux1394 sbp2 does this last step. */
 	sbp2_set_busy_timeout(scsi_id);
 #endif
 
-	PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect);
-	sbp2_agent_reset(unit);
+	PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+	sbp2_agent_reset(lu);
 
-	/* FIXME: Loop over luns here. */
-	lun = 0;
-	retval = scsi_add_device(host, 0, 0, lun);
-	if (retval < 0) {
-		sbp2_send_management_orb(unit, sd->node_id, sd->generation,
-					 SBP2_LOGOUT_REQUEST, sd->login_id,
-					 NULL);
+	memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
+	eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff;
+	eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff;
+
+	sdev = __scsi_add_device(shost, 0, 0,
+				 scsilun_to_int(&eight_bytes_lun), lu);
+	if (IS_ERR(sdev)) {
+		sbp2_send_management_orb(lu, node_id, generation,
+				SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
 		/*
 		 * Set this back to sbp2_login so we fall back and
 		 * retry login on bus reset.
 		 */
-		PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
+		PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+	} else {
+		lu->sdev = sdev;
+		scsi_device_put(sdev);
 	}
-	kref_put(&sd->kref, release_sbp2_device);
+	kref_put(&lu->tgt->kref, sbp2_release_target);
 }
 
+static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+{
+	struct sbp2_logical_unit *lu;
+
+	lu = kmalloc(sizeof(*lu), GFP_KERNEL);
+	if (!lu)
+		return -ENOMEM;
+
+	lu->address_handler.length           = 0x100;
+	lu->address_handler.address_callback = sbp2_status_write;
+	lu->address_handler.callback_data    = lu;
+
+	if (fw_core_add_address_handler(&lu->address_handler,
+					&fw_high_memory_region) < 0) {
+		kfree(lu);
+		return -ENOMEM;
+	}
+
+	lu->tgt  = tgt;
+	lu->sdev = NULL;
+	lu->lun  = lun_entry & 0xffff;
+	lu->retries = 0;
+	INIT_LIST_HEAD(&lu->orb_list);
+	INIT_DELAYED_WORK(&lu->work, sbp2_login);
+
+	list_add_tail(&lu->link, &tgt->lu_list);
+	return 0;
+}
+
+static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
+{
+	struct fw_csr_iterator ci;
+	int key, value;
+
+	fw_csr_iterator_init(&ci, directory);
+	while (fw_csr_iterator_next(&ci, &key, &value))
+		if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
+		    sbp2_add_logical_unit(tgt, value) < 0)
+			return -ENOMEM;
+	return 0;
+}
+
+static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+			      u32 *model, u32 *firmware_revision)
+{
+	struct fw_csr_iterator ci;
+	int key, value;
+
+	fw_csr_iterator_init(&ci, directory);
+	while (fw_csr_iterator_next(&ci, &key, &value)) {
+		switch (key) {
+
+		case CSR_DEPENDENT_INFO | CSR_OFFSET:
+			tgt->management_agent_address =
+					CSR_REGISTER_BASE + 4 * value;
+			break;
+
+		case CSR_DIRECTORY_ID:
+			tgt->directory_id = value;
+			break;
+
+		case CSR_MODEL:
+			*model = value;
+			break;
+
+		case SBP2_CSR_FIRMWARE_REVISION:
+			*firmware_revision = value;
+			break;
+
+		case SBP2_CSR_LOGICAL_UNIT_NUMBER:
+			if (sbp2_add_logical_unit(tgt, value) < 0)
+				return -ENOMEM;
+			break;
+
+		case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
+			if (sbp2_scan_logical_unit_dir(tgt, ci.p + value) < 0)
+				return -ENOMEM;
+			break;
+		}
+	}
+	return 0;
+}
+
+static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
+				  u32 firmware_revision)
+{
+	int i;
+	unsigned w = sbp2_param_workarounds;
+
+	if (w)
+		fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
+			  "if you need the workarounds parameter for %s\n",
+			  tgt->unit->device.bus_id);
+
+	if (w & SBP2_WORKAROUND_OVERRIDE)
+		goto out;
+
+	for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+
+		if (sbp2_workarounds_table[i].firmware_revision !=
+		    (firmware_revision & 0xffffff00))
+			continue;
+
+		if (sbp2_workarounds_table[i].model != model &&
+		    sbp2_workarounds_table[i].model != ~0)
+			continue;
+
+		w |= sbp2_workarounds_table[i].workarounds;
+		break;
+	}
+ out:
+	if (w)
+		fw_notify("Workarounds for %s: 0x%x "
+			  "(firmware_revision 0x%06x, model_id 0x%06x)\n",
+			  tgt->unit->device.bus_id,
+			  w, firmware_revision, model);
+	tgt->workarounds = w;
+}
+
+static struct scsi_host_template scsi_driver_template;
+
 static int sbp2_probe(struct device *dev)
 {
 	struct fw_unit *unit = fw_unit(dev);
 	struct fw_device *device = fw_device(unit->device.parent);
-	struct sbp2_device *sd;
-	struct fw_csr_iterator ci;
-	struct Scsi_Host *host;
-	int i, key, value, err;
+	struct sbp2_target *tgt;
+	struct sbp2_logical_unit *lu;
+	struct Scsi_Host *shost;
 	u32 model, firmware_revision;
 
-	err = -ENOMEM;
-	host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd));
-	if (host == NULL)
-		goto fail;
+	shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
+	if (shost == NULL)
+		return -ENOMEM;
 
-	sd = (struct sbp2_device *) host->hostdata;
-	unit->device.driver_data = sd;
-	sd->unit = unit;
-	INIT_LIST_HEAD(&sd->orb_list);
-	kref_init(&sd->kref);
+	tgt = (struct sbp2_target *)shost->hostdata;
+	unit->device.driver_data = tgt;
+	tgt->unit = unit;
+	kref_init(&tgt->kref);
+	INIT_LIST_HEAD(&tgt->lu_list);
 
-	sd->address_handler.length = 0x100;
-	sd->address_handler.address_callback = sbp2_status_write;
-	sd->address_handler.callback_data = sd;
+	if (fw_device_enable_phys_dma(device) < 0)
+		goto fail_shost_put;
 
-	err = fw_core_add_address_handler(&sd->address_handler,
-					  &fw_high_memory_region);
-	if (err < 0)
-		goto fail_host;
+	if (scsi_add_host(shost, &unit->device) < 0)
+		goto fail_shost_put;
 
-	err = fw_device_enable_phys_dma(device);
-	if (err < 0)
-		goto fail_address_handler;
-
-	err = scsi_add_host(host, &unit->device);
-	if (err < 0)
-		goto fail_address_handler;
-
-	/*
-	 * Scan unit directory to get management agent address,
-	 * firmware revison and model.  Initialize firmware_revision
-	 * and model to values that wont match anything in our table.
-	 */
+	/* Initialize to values that won't match anything in our table. */
 	firmware_revision = 0xff000000;
 	model = 0xff000000;
-	fw_csr_iterator_init(&ci, unit->directory);
-	while (fw_csr_iterator_next(&ci, &key, &value)) {
-		switch (key) {
-		case CSR_DEPENDENT_INFO | CSR_OFFSET:
-			sd->management_agent_address =
-				0xfffff0000000ULL + 4 * value;
-			break;
-		case SBP2_FIRMWARE_REVISION:
-			firmware_revision = value;
-			break;
-		case CSR_MODEL:
-			model = value;
-			break;
-		}
-	}
 
-	for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
-		if (sbp2_workarounds_table[i].firmware_revision !=
-		    (firmware_revision & 0xffffff00))
-			continue;
-		if (sbp2_workarounds_table[i].model != model &&
-		    sbp2_workarounds_table[i].model != ~0)
-			continue;
-		sd->workarounds |= sbp2_workarounds_table[i].workarounds;
-		break;
-	}
+	/* implicit directory ID */
+	tgt->directory_id = ((unit->directory - device->config_rom) * 4
+			     + CSR_CONFIG_ROM) & 0xffffff;
 
-	if (sd->workarounds)
-		fw_notify("Workarounds for node %s: 0x%x "
-			  "(firmware_revision 0x%06x, model_id 0x%06x)\n",
-			  unit->device.bus_id,
-			  sd->workarounds, firmware_revision, model);
+	if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
+			       &firmware_revision) < 0)
+		goto fail_tgt_put;
+
+	sbp2_init_workarounds(tgt, model, firmware_revision);
 
 	get_device(&unit->device);
 
@@ -729,35 +869,34 @@
 	 * reschedule retries. Always get the ref before scheduling
 	 * work.
 	 */
-	INIT_DELAYED_WORK(&sd->work, sbp2_login);
-	if (schedule_delayed_work(&sd->work, 0))
-		kref_get(&sd->kref);
-
+	list_for_each_entry(lu, &tgt->lu_list, link)
+		if (queue_delayed_work(sbp2_wq, &lu->work, 0))
+			kref_get(&tgt->kref);
 	return 0;
 
- fail_address_handler:
-	fw_core_remove_address_handler(&sd->address_handler);
- fail_host:
-	scsi_host_put(host);
- fail:
-	return err;
+ fail_tgt_put:
+	kref_put(&tgt->kref, sbp2_release_target);
+	return -ENOMEM;
+
+ fail_shost_put:
+	scsi_host_put(shost);
+	return -ENOMEM;
 }
 
 static int sbp2_remove(struct device *dev)
 {
 	struct fw_unit *unit = fw_unit(dev);
-	struct sbp2_device *sd = unit->device.driver_data;
+	struct sbp2_target *tgt = unit->device.driver_data;
 
-	kref_put(&sd->kref, release_sbp2_device);
-
+	kref_put(&tgt->kref, sbp2_release_target);
 	return 0;
 }
 
 static void sbp2_reconnect(struct work_struct *work)
 {
-	struct sbp2_device *sd =
-		container_of(work, struct sbp2_device, work.work);
-	struct fw_unit *unit = sd->unit;
+	struct sbp2_logical_unit *lu =
+		container_of(work, struct sbp2_logical_unit, work.work);
+	struct fw_unit *unit = lu->tgt->unit;
 	struct fw_device *device = fw_device(unit->device.parent);
 	int generation, node_id, local_node_id;
 
@@ -765,40 +904,49 @@
 	node_id       = device->node->node_id;
 	local_node_id = device->card->local_node->node_id;
 
-	if (sbp2_send_management_orb(unit, node_id, generation,
+	if (sbp2_send_management_orb(lu, node_id, generation,
 				     SBP2_RECONNECT_REQUEST,
-				     sd->login_id, NULL) < 0) {
-		if (sd->retries++ >= 5) {
+				     lu->login_id, NULL) < 0) {
+		if (lu->retries++ >= 5) {
 			fw_error("failed to reconnect to %s\n",
 				 unit->device.bus_id);
 			/* Fall back and try to log in again. */
-			sd->retries = 0;
-			PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
+			lu->retries = 0;
+			PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
 		}
-		schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
+		queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5));
 		return;
 	}
 
-	sd->generation   = generation;
-	sd->node_id      = node_id;
-	sd->address_high = local_node_id << 16;
+	lu->generation        = generation;
+	lu->tgt->node_id      = node_id;
+	lu->tgt->address_high = local_node_id << 16;
 
-	fw_notify("reconnected to unit %s (%d retries)\n",
-		  unit->device.bus_id, sd->retries);
-	sbp2_agent_reset(unit);
-	sbp2_cancel_orbs(unit);
-	kref_put(&sd->kref, release_sbp2_device);
+	fw_notify("reconnected to %s LUN %04x (%d retries)\n",
+		  unit->device.bus_id, lu->lun, lu->retries);
+
+	sbp2_agent_reset(lu);
+	sbp2_cancel_orbs(lu);
+
+	kref_put(&lu->tgt->kref, sbp2_release_target);
 }
 
 static void sbp2_update(struct fw_unit *unit)
 {
-	struct fw_device *device = fw_device(unit->device.parent);
-	struct sbp2_device *sd = unit->device.driver_data;
+	struct sbp2_target *tgt = unit->device.driver_data;
+	struct sbp2_logical_unit *lu;
 
-	sd->retries = 0;
-	fw_device_enable_phys_dma(device);
-	if (schedule_delayed_work(&sd->work, 0))
-		kref_get(&sd->kref);
+	fw_device_enable_phys_dma(fw_device(unit->device.parent));
+
+	/*
+	 * Fw-core serializes sbp2_update() against sbp2_remove().
+	 * Iteration over tgt->lu_list is therefore safe here.
+	 */
+	list_for_each_entry(lu, &tgt->lu_list, link) {
+		lu->retries = 0;
+		if (queue_delayed_work(sbp2_wq, &lu->work, 0))
+			kref_get(&tgt->kref);
+	}
 }
 
 #define SBP2_UNIT_SPEC_ID_ENTRY	0x0000609e
@@ -868,13 +1016,12 @@
 {
 	struct sbp2_command_orb *orb =
 		container_of(base_orb, struct sbp2_command_orb, base);
-	struct fw_unit *unit = orb->unit;
-	struct fw_device *device = fw_device(unit->device.parent);
+	struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
 	int result;
 
 	if (status != NULL) {
 		if (STATUS_GET_DEAD(*status))
-			sbp2_agent_reset(unit);
+			sbp2_agent_reset(orb->lu);
 
 		switch (STATUS_GET_RESPONSE(*status)) {
 		case SBP2_STATUS_REQUEST_COMPLETE:
@@ -918,12 +1065,10 @@
 	orb->done(orb->cmd);
 }
 
-static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
+static int
+sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
+		     struct sbp2_logical_unit *lu)
 {
-	struct sbp2_device *sd =
-		(struct sbp2_device *)orb->cmd->device->host->hostdata;
-	struct fw_unit *unit = sd->unit;
-	struct fw_device *device = fw_device(unit->device.parent);
 	struct scatterlist *sg;
 	int sg_len, l, i, j, count;
 	dma_addr_t sg_addr;
@@ -942,10 +1087,9 @@
 	 * tables.
 	 */
 	if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
-		orb->request.data_descriptor.high = sd->address_high;
+		orb->request.data_descriptor.high = lu->tgt->address_high;
 		orb->request.data_descriptor.low  = sg_dma_address(sg);
-		orb->request.misc |=
-			COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
+		orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
 		return 0;
 	}
 
@@ -989,7 +1133,7 @@
 	 * initiator (i.e. us), but data_descriptor can refer to data
 	 * on other nodes so we need to put our ID in descriptor.high.
 	 */
-	orb->request.data_descriptor.high = sd->address_high;
+	orb->request.data_descriptor.high = lu->tgt->address_high;
 	orb->request.data_descriptor.low  = orb->page_table_bus;
 	orb->request.misc |=
 		COMMAND_ORB_PAGE_TABLE_PRESENT |
@@ -1008,12 +1152,11 @@
 
 static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
 {
-	struct sbp2_device *sd =
-		(struct sbp2_device *)cmd->device->host->hostdata;
-	struct fw_unit *unit = sd->unit;
-	struct fw_device *device = fw_device(unit->device.parent);
+	struct sbp2_logical_unit *lu = cmd->device->hostdata;
+	struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
 	struct sbp2_command_orb *orb;
 	unsigned max_payload;
+	int retval = SCSI_MLQUEUE_HOST_BUSY;
 
 	/*
 	 * Bidirectional commands are not yet implemented, and unknown
@@ -1029,14 +1172,14 @@
 	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
 	if (orb == NULL) {
 		fw_notify("failed to alloc orb\n");
-		goto fail_alloc;
+		return SCSI_MLQUEUE_HOST_BUSY;
 	}
 
 	/* Initialize rcode to something not RCODE_COMPLETE. */
 	orb->base.rcode = -1;
 	kref_init(&orb->base.kref);
 
-	orb->unit = unit;
+	orb->lu   = lu;
 	orb->done = done;
 	orb->cmd  = cmd;
 
@@ -1062,8 +1205,8 @@
 		orb->request.misc |=
 			COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
 
-	if (scsi_sg_count(cmd) && sbp2_command_orb_map_scatterlist(orb) < 0)
-		goto fail_mapping;
+	if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
+		goto out;
 
 	fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
 
@@ -1076,49 +1219,47 @@
 		dma_map_single(device->card->device, &orb->request,
 			       sizeof(orb->request), DMA_TO_DEVICE);
 	if (dma_mapping_error(orb->base.request_bus))
-		goto fail_mapping;
+		goto out;
 
-	sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
-		      sd->command_block_agent_address + SBP2_ORB_POINTER);
-
+	sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
+		      lu->command_block_agent_address + SBP2_ORB_POINTER);
+	retval = 0;
+ out:
 	kref_put(&orb->base.kref, free_orb);
-	return 0;
-
- fail_mapping:
-	kref_put(&orb->base.kref, free_orb);
- fail_alloc:
-	return SCSI_MLQUEUE_HOST_BUSY;
+	return retval;
 }
 
 static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
 {
-	struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
+	struct sbp2_logical_unit *lu = sdev->hostdata;
 
 	sdev->allow_restart = 1;
 
-	if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36)
+	if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
 		sdev->inquiry_len = 36;
+
 	return 0;
 }
 
 static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
 {
-	struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
-	struct fw_unit *unit = sd->unit;
+	struct sbp2_logical_unit *lu = sdev->hostdata;
 
 	sdev->use_10_for_rw = 1;
 
 	if (sdev->type == TYPE_ROM)
 		sdev->use_10_for_ms = 1;
+
 	if (sdev->type == TYPE_DISK &&
-	    sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+	    lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
 		sdev->skip_ms_page_8 = 1;
-	if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
-		fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
+
+	if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
 		sdev->fix_capacity = 1;
-	}
-	if (sd->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
+
+	if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
 		blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+
 	return 0;
 }
 
@@ -1128,13 +1269,11 @@
  */
 static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
 {
-	struct sbp2_device *sd =
-		(struct sbp2_device *)cmd->device->host->hostdata;
-	struct fw_unit *unit = sd->unit;
+	struct sbp2_logical_unit *lu = cmd->device->hostdata;
 
 	fw_notify("sbp2_scsi_abort\n");
-	sbp2_agent_reset(unit);
-	sbp2_cancel_orbs(unit);
+	sbp2_agent_reset(lu);
+	sbp2_cancel_orbs(lu);
 
 	return SUCCESS;
 }
@@ -1151,37 +1290,18 @@
 			    char *buf)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
-	struct sbp2_device *sd;
-	struct fw_unit *unit;
+	struct sbp2_logical_unit *lu;
 	struct fw_device *device;
-	u32 directory_id;
-	struct fw_csr_iterator ci;
-	int key, value, lun;
 
 	if (!sdev)
 		return 0;
-	sd = (struct sbp2_device *)sdev->host->hostdata;
-	unit = sd->unit;
-	device = fw_device(unit->device.parent);
 
-	/* implicit directory ID */
-	directory_id = ((unit->directory - device->config_rom) * 4
-			+ CSR_CONFIG_ROM) & 0xffffff;
-
-	/* explicit directory ID, overrides implicit ID if present */
-	fw_csr_iterator_init(&ci, unit->directory);
-	while (fw_csr_iterator_next(&ci, &key, &value))
-		if (key == CSR_DIRECTORY_ID) {
-			directory_id = value;
-			break;
-		}
-
-	/* FIXME: Make this work for multi-lun devices. */
-	lun = 0;
+	lu = sdev->hostdata;
+	device = fw_device(lu->tgt->unit->device.parent);
 
 	return sprintf(buf, "%08x%08x:%06x:%04x\n",
 			device->config_rom[3], device->config_rom[4],
-			directory_id, lun);
+			lu->tgt->directory_id, lu->lun);
 }
 
 static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
@@ -1219,12 +1339,17 @@
 
 static int __init sbp2_init(void)
 {
+	sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
+	if (!sbp2_wq)
+		return -ENOMEM;
+
 	return driver_register(&sbp2_driver.driver);
 }
 
 static void __exit sbp2_cleanup(void)
 {
 	driver_unregister(&sbp2_driver.driver);
+	destroy_workqueue(sbp2_wq);
 }
 
 module_init(sbp2_init);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 39e5cd1..0fc9b00 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -152,6 +152,10 @@
 	node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
 }
 
+static inline struct fw_node *fw_node(struct list_head *l)
+{
+	return list_entry(l, struct fw_node, link);
+}
 
 /**
  * build_tree - Build the tree representation of the topology
@@ -162,7 +166,7 @@
  * This function builds the tree representation of the topology given
  * by the self IDs from the latest bus reset.  During the construction
  * of the tree, the function checks that the self IDs are valid and
- * internally consistent.  On succcess this funtions returns the
+ * internally consistent.  On succcess this function returns the
  * fw_node corresponding to the local card otherwise NULL.
  */
 static struct fw_node *build_tree(struct fw_card *card,
@@ -211,6 +215,10 @@
 		 */
 		for (i = 0, h = &stack; i < child_port_count; i++)
 			h = h->prev;
+		/*
+		 * When the stack is empty, this yields an invalid value,
+		 * but that pointer will never be dereferenced.
+		 */
 		child = fw_node(h);
 
 		node = fw_node_create(q, port_count, card->color);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index 1b56b4a..cedc1ec 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -51,12 +51,6 @@
 };
 
 static inline struct fw_node *
-fw_node(struct list_head *l)
-{
-	return list_entry(l, struct fw_node, link);
-}
-
-static inline struct fw_node *
 fw_node_get(struct fw_node *node)
 {
 	atomic_inc(&node->ref_count);
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 3e1cb12..9959b79 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -410,7 +410,12 @@
  * controller.  When a request is received that falls within the
  * specified address range, the specified callback is invoked.  The
  * parameters passed to the callback give the details of the
- * particular request
+ * particular request.
+ *
+ * Return value:  0 on success, non-zero otherwise.
+ * The start offset of the handler's address region is determined by
+ * fw_core_add_address_handler() and is returned in handler->offset.
+ * The offset is quadlet-aligned.
  */
 int
 fw_core_add_address_handler(struct fw_address_handler *handler,
@@ -422,14 +427,15 @@
 
 	spin_lock_irqsave(&address_handler_lock, flags);
 
-	handler->offset = region->start;
+	handler->offset = roundup(region->start, 4);
 	while (handler->offset + handler->length <= region->end) {
 		other =
 		    lookup_overlapping_address_handler(&address_handler_list,
 						       handler->offset,
 						       handler->length);
 		if (other != NULL) {
-			handler->offset += other->length;
+			handler->offset =
+			    roundup(other->offset + other->length, 4);
 		} else {
 			list_add_tail(&handler->link, &address_handler_list);
 			ret = 0;
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index d08166b..e8122de 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -218,12 +218,10 @@
 	if (!kv)
 		return NULL;
 
+	atomic_set(&kv->refcnt, 1);
 	kv->key.type = type;
 	kv->key.id = key;
-
 	kv->associate = NULL;
-	kv->refcnt = 1;
-
 	kv->next = NULL;
 	kv->prev = NULL;
 	kv->offset = 0;
@@ -326,12 +324,13 @@
 	if (kv->associate)
 		csr1212_release_keyval(kv->associate);
 
-	associate->refcnt++;
+	csr1212_keep_keyval(associate);
 	kv->associate = associate;
 }
 
-int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
-				       struct csr1212_keyval *kv)
+static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
+						struct csr1212_keyval *kv,
+						bool keep_keyval)
 {
 	struct csr1212_dentry *dentry;
 
@@ -341,10 +340,10 @@
 	if (!dentry)
 		return -ENOMEM;
 
+	if (keep_keyval)
+		csr1212_keep_keyval(kv);
 	dentry->kv = kv;
 
-	kv->refcnt++;
-
 	dentry->next = NULL;
 	dentry->prev = dir->value.directory.dentries_tail;
 
@@ -358,6 +357,12 @@
 	return CSR1212_SUCCESS;
 }
 
+int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
+				       struct csr1212_keyval *kv)
+{
+	return __csr1212_attach_keyval_to_directory(dir, kv, true);
+}
+
 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
 	(&((kv)->value.leaf.data[1]))
 
@@ -483,15 +488,18 @@
 
 /* This function is used to free the memory taken by a keyval.  If the given
  * keyval is a directory type, then any keyvals contained in that directory
- * will be destroyed as well if their respective refcnts are 0.  By means of
+ * will be destroyed as well if noone holds a reference on them.  By means of
  * list manipulation, this routine will descend a directory structure in a
  * non-recursive manner. */
-static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
+void csr1212_release_keyval(struct csr1212_keyval *kv)
 {
 	struct csr1212_keyval *k, *a;
 	struct csr1212_dentry dentry;
 	struct csr1212_dentry *head, *tail;
 
+	if (!atomic_dec_and_test(&kv->refcnt))
+		return;
+
 	dentry.kv = kv;
 	dentry.next = NULL;
 	dentry.prev = NULL;
@@ -503,9 +511,8 @@
 		k = head->kv;
 
 		while (k) {
-			k->refcnt--;
-
-			if (k->refcnt > 0)
+			/* must not dec_and_test kv->refcnt again */
+			if (k != kv && !atomic_dec_and_test(&k->refcnt))
 				break;
 
 			a = k->associate;
@@ -536,14 +543,6 @@
 	}
 }
 
-void csr1212_release_keyval(struct csr1212_keyval *kv)
-{
-	if (kv->refcnt > 1)
-		kv->refcnt--;
-	else
-		csr1212_destroy_keyval(kv);
-}
-
 void csr1212_destroy_csr(struct csr1212_csr *csr)
 {
 	struct csr1212_csr_rom_cache *c, *oc;
@@ -1126,6 +1125,7 @@
 	int ret = CSR1212_SUCCESS;
 	struct csr1212_keyval *k = NULL;
 	u32 offset;
+	bool keep_keyval = true;
 
 	switch (CSR1212_KV_KEY_TYPE(ki)) {
 	case CSR1212_KV_TYPE_IMMEDIATE:
@@ -1135,8 +1135,8 @@
 			ret = -ENOMEM;
 			goto out;
 		}
-
-		k->refcnt = 0;	/* Don't keep local reference when parsing. */
+		/* Don't keep local reference when parsing. */
+		keep_keyval = false;
 		break;
 
 	case CSR1212_KV_TYPE_CSR_OFFSET:
@@ -1146,7 +1146,8 @@
 			ret = -ENOMEM;
 			goto out;
 		}
-		k->refcnt = 0;	/* Don't keep local reference when parsing. */
+		/* Don't keep local reference when parsing. */
+		keep_keyval = false;
 		break;
 
 	default:
@@ -1174,8 +1175,10 @@
 			ret = -ENOMEM;
 			goto out;
 		}
-		k->refcnt = 0;	/* Don't keep local reference when parsing. */
-		k->valid = 0;	/* Contents not read yet so it's not valid. */
+		/* Don't keep local reference when parsing. */
+		keep_keyval = false;
+		/* Contents not read yet so it's not valid. */
+		k->valid = 0;
 		k->offset = offset;
 
 		k->prev = dir;
@@ -1183,7 +1186,7 @@
 		dir->next->prev = k;
 		dir->next = k;
 	}
-	ret = csr1212_attach_keyval_to_directory(dir, k);
+	ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
 out:
 	if (ret != CSR1212_SUCCESS && k != NULL)
 		free_keyval(k);
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index df909ce..043039f 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -32,6 +32,7 @@
 
 #include <linux/types.h>
 #include <linux/slab.h>
+#include <asm/atomic.h>
 
 #define CSR1212_MALLOC(size)	kmalloc((size), GFP_KERNEL)
 #define CSR1212_FREE(ptr)	kfree(ptr)
@@ -149,7 +150,7 @@
 		struct csr1212_directory directory;
 	} value;
 	struct csr1212_keyval *associate;
-	int refcnt;
+	atomic_t refcnt;
 
 	/* used in generating and/or parsing CSR image */
 	struct csr1212_keyval *next, *prev;	/* flat list of CSR elements */
@@ -350,7 +351,8 @@
  * need for code to retain a keyval that has been parsed. */
 static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
 {
-	kv->refcnt++;
+	atomic_inc(&kv->refcnt);
+	smp_mb__after_atomic_inc();
 }
 
 
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index dc9dce2..b166b35 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1153,8 +1153,6 @@
 			pdg->sz++;
 			lh = find_partial_datagram(pdgl, dgl);
 		} else {
-			struct partial_datagram *pd;
-
 			pd = list_entry(lh, struct partial_datagram, list);
 
 			if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
@@ -1222,23 +1220,19 @@
 		priv->stats.rx_errors++;
 		priv->stats.rx_dropped++;
 		dev_kfree_skb_any(skb);
-		goto bad_proto;
-	}
-
-	if (netif_rx(skb) == NET_RX_DROP) {
+	} else if (netif_rx(skb) == NET_RX_DROP) {
 		priv->stats.rx_errors++;
 		priv->stats.rx_dropped++;
-		goto bad_proto;
+	} else {
+		priv->stats.rx_packets++;
+		priv->stats.rx_bytes += skb->len;
 	}
 
-	/* Statistics */
-	priv->stats.rx_packets++;
-	priv->stats.rx_bytes += skb->len;
+	spin_unlock_irqrestore(&priv->lock, flags);
 
 bad_proto:
 	if (netif_queue_stopped(dev))
 		netif_wake_queue(dev);
-	spin_unlock_irqrestore(&priv->lock, flags);
 
 	dev->last_rx = jiffies;
 
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 98fd985..36c747b 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -488,7 +488,7 @@
 	highlevel_host_reset(host);
 }
 
-static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pending_packets_lock);
 
 /**
  * hpsb_packet_sent - notify core of sending a packet
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 1939fee..90dc75b 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1014,13 +1014,13 @@
 			    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
 				switch (last_key_id) {
 				case CSR1212_KV_ID_VENDOR:
-					ud->vendor_name_kv = kv;
 					csr1212_keep_keyval(kv);
+					ud->vendor_name_kv = kv;
 					break;
 
 				case CSR1212_KV_ID_MODEL:
-					ud->model_name_kv = kv;
 					csr1212_keep_keyval(kv);
+					ud->model_name_kv = kv;
 					break;
 
 				}
@@ -1112,7 +1112,7 @@
 {
 	unsigned int ud_id = 0;
 	struct csr1212_dentry *dentry;
-	struct csr1212_keyval *kv;
+	struct csr1212_keyval *kv, *vendor_name_kv = NULL;
 	u8 last_key_id = 0;
 
 	ne->needs_probe = 0;
@@ -1139,8 +1139,8 @@
 				    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
 				    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
 				    CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
-					ne->vendor_name_kv = kv;
 					csr1212_keep_keyval(kv);
+					vendor_name_kv = kv;
 				}
 			}
 			break;
@@ -1149,10 +1149,13 @@
 	}
 
 	if (ne->vendor_name_kv) {
-		int error = device_create_file(&ne->device,
-					       &dev_attr_ne_vendor_name_kv);
-
-		if (error && error != -EEXIST)
+		kv = ne->vendor_name_kv;
+		ne->vendor_name_kv = vendor_name_kv;
+		csr1212_release_keyval(kv);
+	} else if (vendor_name_kv) {
+		ne->vendor_name_kv = vendor_name_kv;
+		if (device_create_file(&ne->device,
+				       &dev_attr_ne_vendor_name_kv) != 0)
 			HPSB_ERR("Failed to add sysfs attribute");
 	}
 }
@@ -1712,7 +1715,8 @@
 		 * to make sure things settle down. */
 		g = get_hpsb_generation(host);
 		for (i = 0; i < 4 ; i++) {
-			if (msleep_interruptible(63) || kthread_should_stop())
+			msleep_interruptible(63);
+			if (kthread_should_stop())
 				goto exit;
 
 			/* Now get the generation in which the node ID's we collect
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index d1a5bcd..8af01ab 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -121,16 +121,6 @@
 	return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
 }
 
-static int bit_reg(struct i2c_client *client)
-{
-	return 0;
-}
-
-static int bit_unreg(struct i2c_client *client)
-{
-	return 0;
-}
-
 static struct i2c_algo_bit_data bit_data = {
 	.setsda			= bit_setsda,
 	.setscl			= bit_setscl,
@@ -140,14 +130,6 @@
 	.timeout		= 100,
 };
 
-static struct i2c_adapter bit_ops = {
-	.id 			= 0xAA, //FIXME: probably we should get an id in i2c-id.h
-	.client_register	= bit_reg,
-	.client_unregister	= bit_unreg,
-	.name			= "PCILynx I2C",
-};
-
-
 
 /*
  * PCL handling functions.
@@ -765,7 +747,6 @@
                 } else {
                         struct ti_pcl pcl;
                         u32 ack;
-                        struct hpsb_packet *packet;
 
                         PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
 
@@ -1436,9 +1417,11 @@
         	struct i2c_algo_bit_data i2c_adapter_data;
 
         	error = -ENOMEM;
-		i2c_ad = kmemdup(&bit_ops, sizeof(*i2c_ad), GFP_KERNEL);
+		i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
         	if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
 
+		i2c_ad->id = I2C_HW_B_PCILYNX;
+		strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
                 i2c_adapter_data = bit_data;
                 i2c_ad->algo_data = &i2c_adapter_data;
                 i2c_adapter_data.data = lynx;
@@ -1465,13 +1448,11 @@
                                                   { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
                                                 };
 
-                        /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
-                           do it more efficiently in one transaction rather then using several reads */
+			/* we use i2c_transfer because we have no i2c_client
+			   at hand */
                         if (i2c_transfer(i2c_ad, msg, 2) < 0) {
                                 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
                         } else {
-                                int i;
-
                                 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
 				/* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
 				 * generation(1394a) and link_spd(1394a) field and recalculate
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index a81ba8f..1b353b9 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -242,6 +242,8 @@
 
 static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
 
+static DEFINE_RWLOCK(sbp2_hi_logical_units_lock);
+
 static struct hpsb_highlevel sbp2_highlevel = {
 	.name		= SBP2_DEVICE_NAME,
 	.host_reset	= sbp2_host_reset,
@@ -732,6 +734,7 @@
 	struct sbp2_fwhost_info *hi;
 	struct Scsi_Host *shost = NULL;
 	struct sbp2_lu *lu = NULL;
+	unsigned long flags;
 
 	lu = kzalloc(sizeof(*lu), GFP_KERNEL);
 	if (!lu) {
@@ -784,7 +787,9 @@
 
 	lu->hi = hi;
 
+	write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
 	list_add_tail(&lu->lu_list, &hi->logical_units);
+	write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
 
 	/* Register the status FIFO address range. We could use the same FIFO
 	 * for targets at different nodes. However we need different FIFOs per
@@ -828,16 +833,20 @@
 {
 	struct sbp2_fwhost_info *hi;
 	struct sbp2_lu *lu;
+	unsigned long flags;
 
 	hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
 	if (!hi)
 		return;
+
+	read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
 	list_for_each_entry(lu, &hi->logical_units, lu_list)
 		if (likely(atomic_read(&lu->state) !=
 			   SBP2LU_STATE_IN_SHUTDOWN)) {
 			atomic_set(&lu->state, SBP2LU_STATE_IN_RESET);
 			scsi_block_requests(lu->shost);
 		}
+	read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
 }
 
 static int sbp2_start_device(struct sbp2_lu *lu)
@@ -919,6 +928,7 @@
 static void sbp2_remove_device(struct sbp2_lu *lu)
 {
 	struct sbp2_fwhost_info *hi;
+	unsigned long flags;
 
 	if (!lu)
 		return;
@@ -933,7 +943,9 @@
 	flush_scheduled_work();
 	sbp2util_remove_command_orb_pool(lu, hi->host);
 
+	write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
 	list_del(&lu->lu_list);
+	write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
 
 	if (lu->login_response)
 		dma_free_coherent(hi->host->device.parent,
@@ -1707,6 +1719,7 @@
 	}
 
 	/* Find the unit which wrote the status. */
+	read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
 	list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
 		if (lu_tmp->ne->nodeid == nodeid &&
 		    lu_tmp->status_fifo_addr == addr) {
@@ -1714,6 +1727,8 @@
 			break;
 		}
 	}
+	read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
+
 	if (unlikely(!lu)) {
 		SBP2_ERR("lu is NULL - device is gone?");
 		return RCODE_ADDRESS_ERROR;
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index ce22bf5..f99cb77 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2225,8 +2225,9 @@
 #endif /* CONFIG_PCI */
 
 #ifdef CONFIG_ZORRO
-static void __devexit cirrusfb_zorro_unmap(struct cirrusfb_info *cinfo)
+static void __devexit cirrusfb_zorro_unmap(struct fb_info *info)
 {
+	struct cirrusfb_info *cinfo = info->par;
 	zorro_release_device(cinfo->zdev);
 
 	if (cinfo->btype == BT_PICASSO4) {
@@ -2573,7 +2574,7 @@
 	printk(KERN_INFO "Cirrus Logic chipset on Zorro bus\n");
 	zorro_set_drvdata(z, info);
 
-	ret = cirrusfb_register(cinfo);
+	ret = cirrusfb_register(info);
 	if (ret) {
 		if (btype == BT_PICASSO4) {
 			iounmap(info->screen_base);
diff --git a/fs/open.c b/fs/open.c
index 1d9e5e9..044bfa8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -757,6 +757,10 @@
 	f->f_op = fops_get(inode->i_fop);
 	file_move(f, &inode->i_sb->s_files);
 
+	error = security_dentry_open(f);
+	if (error)
+		goto cleanup_all;
+
 	if (!open && f->f_op)
 		open = f->f_op->open;
 	if (open) {
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 2af321f..65be95d 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -6,6 +6,149 @@
  */
 #ifndef _ASM_DMA_MAPPING_H
 #define _ASM_DMA_MAPPING_H
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/cache.h>
+/* need struct page definitions */
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <asm/io.h>
+
+#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/*
+ * DMA-consistent mapping functions for PowerPCs that don't support
+ * cache snooping.  These allocate/free a region of uncached mapped
+ * memory space for use with DMA devices.  Alternatively, you could
+ * allocate the space "normally" and use the cache management functions
+ * to ensure it is consistent.
+ */
+extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
+extern void __dma_free_coherent(size_t size, void *vaddr);
+extern void __dma_sync(void *vaddr, size_t size, int direction);
+extern void __dma_sync_page(struct page *page, unsigned long offset,
+				 size_t size, int direction);
+
+#else /* ! CONFIG_NOT_COHERENT_CACHE */
+/*
+ * Cache coherent cores.
+ */
+
+#define __dma_alloc_coherent(gfp, size, handle)	NULL
+#define __dma_free_coherent(size, addr)		((void)0)
+#define __dma_sync(addr, size, rw)		((void)0)
+#define __dma_sync_page(pg, off, sz, rw)	((void)0)
+
+#endif /* ! CONFIG_NOT_COHERENT_CACHE */
+
+#ifdef CONFIG_PPC64
+/*
+ * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
+ */
+struct dma_mapping_ops {
+	void *		(*alloc_coherent)(struct device *dev, size_t size,
+				dma_addr_t *dma_handle, gfp_t flag);
+	void		(*free_coherent)(struct device *dev, size_t size,
+				void *vaddr, dma_addr_t dma_handle);
+	dma_addr_t	(*map_single)(struct device *dev, void *ptr,
+				size_t size, enum dma_data_direction direction);
+	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr,
+				size_t size, enum dma_data_direction direction);
+	int		(*map_sg)(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction direction);
+	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction direction);
+	int		(*dma_supported)(struct device *dev, u64 mask);
+	int		(*set_dma_mask)(struct device *dev, u64 dma_mask);
+};
+
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+{
+	/* We don't handle the NULL dev case for ISA for now. We could
+	 * do it via an out of line call but it is not needed for now. The
+	 * only ISA DMA device we support is the floppy and we have a hack
+	 * in the floppy driver directly to get a device for us.
+	 */
+	if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
+		return NULL;
+	return dev->archdata.dma_ops;
+}
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	if (unlikely(dma_ops == NULL))
+		return 0;
+	if (dma_ops->dma_supported == NULL)
+		return 1;
+	return dma_ops->dma_supported(dev, mask);
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	if (unlikely(dma_ops == NULL))
+		return -EIO;
+	if (dma_ops->set_dma_mask != NULL)
+		return dma_ops->set_dma_mask(dev, dma_mask);
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+	*dev->dma_mask = dma_mask;
+	return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+					size_t size,
+					enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->map_single(dev, cpu_addr, size, direction);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+				    size_t size,
+				    enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	dma_ops->unmap_single(dev, dma_addr, size, direction);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+				      unsigned long offset, size_t size,
+				      enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->map_single(dev, page_address(page) + offset, size,
+			direction);
+}
 
 static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
 				  size_t size,
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 1a45d6f..0f0e271 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -178,6 +178,7 @@
 #define FW_CDEV_IOC_QUEUE_ISO		_IOWR('#', 0x09, struct fw_cdev_queue_iso)
 #define FW_CDEV_IOC_START_ISO		_IOW('#', 0x0a, struct fw_cdev_start_iso)
 #define FW_CDEV_IOC_STOP_ISO		_IOW('#', 0x0b, struct fw_cdev_stop_iso)
+#define FW_CDEV_IOC_GET_CYCLE_TIMER	_IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
 
 /* FW_CDEV_VERSION History
  *
@@ -459,4 +460,18 @@
 	__u32 handle;
 };
 
+/**
+ * struct fw_cdev_get_cycle_timer - read cycle timer register
+ * @local_time:   system time, in microseconds since the Epoch
+ * @cycle_timer:  isochronous cycle timer, as per OHCI 1.1 clause 5.13
+ *
+ * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
+ * and also the system clock.  This allows to express the receive time of an
+ * isochronous packet as a system time with microsecond accuracy.
+ */
+struct fw_cdev_get_cycle_timer {
+	__u64 local_time;
+	__u32 cycle_timer;
+};
+
 #endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 1a15526e..928d479 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -504,6 +504,13 @@
  *	@file contains the file structure being received.
  *	Return 0 if permission is granted.
  *
+ * Security hook for dentry
+ *
+ * @dentry_open
+ *	Save open-time permission checking state for later use upon
+ *	file_permission, and recheck access if anything has changed
+ *	since inode_permission.
+ *
  * Security hooks for task operations.
  *
  * @task_create:
@@ -1256,6 +1263,7 @@
 	int (*file_send_sigiotask) (struct task_struct * tsk,
 				    struct fown_struct * fown, int sig);
 	int (*file_receive) (struct file * file);
+	int (*dentry_open)  (struct file *file);
 
 	int (*task_create) (unsigned long clone_flags);
 	int (*task_alloc_security) (struct task_struct * p);
@@ -1864,6 +1872,11 @@
 	return security_ops->file_receive (file);
 }
 
+static inline int security_dentry_open (struct file *file)
+{
+	return security_ops->dentry_open (file);
+}
+
 static inline int security_task_create (unsigned long clone_flags)
 {
 	return security_ops->task_create (clone_flags);
@@ -2546,6 +2559,11 @@
 	return 0;
 }
 
+static inline int security_dentry_open (struct file *file)
+{
+	return 0;
+}
+
 static inline int security_task_create (unsigned long clone_flags)
 {
 	return 0;
diff --git a/security/dummy.c b/security/dummy.c
index 853ec22..64b647a 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -463,6 +463,11 @@
 	return 0;
 }
 
+static int dummy_dentry_open (struct file *file)
+{
+	return 0;
+}
+
 static int dummy_task_create (unsigned long clone_flags)
 {
 	return 0;
@@ -1033,6 +1038,7 @@
 	set_to_dummy_if_null(ops, file_set_fowner);
 	set_to_dummy_if_null(ops, file_send_sigiotask);
 	set_to_dummy_if_null(ops, file_receive);
+	set_to_dummy_if_null(ops, dentry_open);
 	set_to_dummy_if_null(ops, task_create);
 	set_to_dummy_if_null(ops, task_alloc_security);
 	set_to_dummy_if_null(ops, task_free_security);
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 0e69adf..81b3dff 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -916,3 +916,8 @@
 	avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
 	return rc;
 }
+
+u32 avc_policy_seqno(void)
+{
+	return avc_cache.latest_notif;
+}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index cf76150..97b7e27 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -14,6 +14,8 @@
  *                          <dgoeddel@trustedcs.com>
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *                     Paul Moore, <paul.moore@hp.com>
+ *  Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
+ *                     Yuichi Nakamura <ynakam@hitachisoft.jp>
  *
  *	This program is free software; you can redistribute it and/or modify
  *	it under the terms of the GNU General Public License version 2,
@@ -2464,7 +2466,7 @@
 
 /* file security operations */
 
-static int selinux_file_permission(struct file *file, int mask)
+static int selinux_revalidate_file_permission(struct file *file, int mask)
 {
 	int rc;
 	struct inode *inode = file->f_path.dentry->d_inode;
@@ -2486,6 +2488,25 @@
 	return selinux_netlbl_inode_permission(inode, mask);
 }
 
+static int selinux_file_permission(struct file *file, int mask)
+{
+	struct inode *inode = file->f_path.dentry->d_inode;
+	struct task_security_struct *tsec = current->security;
+	struct file_security_struct *fsec = file->f_security;
+	struct inode_security_struct *isec = inode->i_security;
+
+	if (!mask) {
+		/* No permission to check.  Existence test. */
+		return 0;
+	}
+
+	if (tsec->sid == fsec->sid && fsec->isid == isec->sid
+	    && fsec->pseqno == avc_policy_seqno())
+		return selinux_netlbl_inode_permission(inode, mask);
+
+	return selinux_revalidate_file_permission(file, mask);
+}
+
 static int selinux_file_alloc_security(struct file *file)
 {
 	return file_alloc_security(file);
@@ -2725,6 +2746,34 @@
 	return file_has_perm(current, file, file_to_av(file));
 }
 
+static int selinux_dentry_open(struct file *file)
+{
+	struct file_security_struct *fsec;
+	struct inode *inode;
+	struct inode_security_struct *isec;
+	inode = file->f_path.dentry->d_inode;
+	fsec = file->f_security;
+	isec = inode->i_security;
+	/*
+	 * Save inode label and policy sequence number
+	 * at open-time so that selinux_file_permission
+	 * can determine whether revalidation is necessary.
+	 * Task label is already saved in the file security
+	 * struct as its SID.
+	 */
+	fsec->isid = isec->sid;
+	fsec->pseqno = avc_policy_seqno();
+	/*
+	 * Since the inode label or policy seqno may have changed
+	 * between the selinux_inode_permission check and the saving
+	 * of state above, recheck that access is still permitted.
+	 * Otherwise, access might never be revalidated against the
+	 * new inode label or new policy.
+	 * This check is not redundant - do not remove.
+	 */
+	return inode_has_perm(current, inode, file_to_av(file), NULL);
+}
+
 /* task security operations */
 
 static int selinux_task_create(unsigned long clone_flags)
@@ -4794,6 +4843,8 @@
 	.file_send_sigiotask =		selinux_file_send_sigiotask,
 	.file_receive =			selinux_file_receive,
 
+	.dentry_open =                  selinux_dentry_open,
+
 	.task_create =			selinux_task_create,
 	.task_alloc_security =		selinux_task_alloc_security,
 	.task_free_security =		selinux_task_free_security,
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index e145f6e..553607a 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -112,6 +112,8 @@
                  u16 tclass, u32 requested,
                  struct avc_audit_data *auditdata);
 
+u32 avc_policy_seqno(void);
+
 #define AVC_CALLBACK_GRANT		1
 #define AVC_CALLBACK_TRY_REVOKE		2
 #define AVC_CALLBACK_REVOKE		4
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 91b88f0..642a9fd 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -53,6 +53,8 @@
 	struct file *file;              /* back pointer to file object */
 	u32 sid;              /* SID of open file description */
 	u32 fown_sid;         /* SID of file owner (for SIGIO) */
+	u32 isid;             /* SID of inode at the time of file open */
+	u32 pseqno;           /* Policy seqno at the time of file open */
 };
 
 struct superblock_security_struct {
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 83bdd4d..39337af 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -90,6 +90,8 @@
 
 int security_get_classes(char ***classes, int *nclasses);
 int security_get_permissions(char *class, char ***perms, int *nperms);
+int security_get_reject_unknown(void);
+int security_get_allow_unknown(void);
 
 #define SECURITY_FS_USE_XATTR		1 /* use xattr */
 #define SECURITY_FS_USE_TRANS		2 /* use transition SIDs, e.g. devpts/tmpfs */
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index c9e92da..f5f3e6d 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -103,6 +103,8 @@
 	SEL_MEMBER,	/* compute polyinstantiation membership decision */
 	SEL_CHECKREQPROT, /* check requested protection, not kernel-applied one */
 	SEL_COMPAT_NET,	/* whether to use old compat network packet controls */
+	SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
+	SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
 	SEL_INO_NEXT,	/* The next inode number to use */
 };
 
@@ -177,6 +179,23 @@
 	.write		= sel_write_enforce,
 };
 
+static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	char tmpbuf[TMPBUFLEN];
+	ssize_t length;
+	ino_t ino = filp->f_path.dentry->d_inode->i_ino;
+	int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
+		security_get_reject_unknown() : !security_get_allow_unknown();
+
+	length = scnprintf(tmpbuf, TMPBUFLEN, "%d", handle_unknown);
+	return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
+}
+
+static const struct file_operations sel_handle_unknown_ops = {
+	.read		= sel_read_handle_unknown,
+};
+
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 static ssize_t sel_write_disable(struct file * file, const char __user * buf,
 				 size_t count, loff_t *ppos)
@@ -309,6 +328,11 @@
 		length = count;
 
 out1:
+
+	printk(KERN_INFO "SELinux: policy loaded with handle_unknown=%s\n",
+	       (security_get_reject_unknown() ? "reject" :
+		(security_get_allow_unknown() ? "allow" : "deny")));
+
 	audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
 		"policy loaded auid=%u",
 		audit_get_loginuid(current->audit_context));
@@ -1575,6 +1599,8 @@
 		[SEL_MEMBER] = {"member", &transaction_ops, S_IRUGO|S_IWUGO},
 		[SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
 		[SEL_COMPAT_NET] = {"compat_net", &sel_compat_net_ops, S_IRUGO|S_IWUSR},
+		[SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
+		[SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
 		/* last one */ {""}
 	};
 	ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 85705eb..7551af1 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -12,24 +12,25 @@
  *	This program is free software; you can redistribute it and/or modify
  *  	it under the terms of the GNU General Public License as published by
  *	the Free Software Foundation, version 2.
+ *
+ * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * 	Tuned number of hash slots for avtab to reduce memory usage
  */
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
-#include <linux/vmalloc.h>
 #include <linux/errno.h>
-
 #include "avtab.h"
 #include "policydb.h"
 
-#define AVTAB_HASH(keyp) \
-((keyp->target_class + \
- (keyp->target_type << 2) + \
- (keyp->source_type << 9)) & \
- AVTAB_HASH_MASK)
-
 static struct kmem_cache *avtab_node_cachep;
 
+static inline int avtab_hash(struct avtab_key *keyp, u16 mask)
+{
+	return ((keyp->target_class + (keyp->target_type << 2) +
+		 (keyp->source_type << 9)) & mask);
+}
+
 static struct avtab_node*
 avtab_insert_node(struct avtab *h, int hvalue,
 		  struct avtab_node * prev, struct avtab_node * cur,
@@ -59,10 +60,10 @@
 	struct avtab_node *prev, *cur, *newnode;
 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-	if (!h)
+	if (!h || !h->htable)
 		return -EINVAL;
 
-	hvalue = AVTAB_HASH(key);
+	hvalue = avtab_hash(key, h->mask);
 	for (prev = NULL, cur = h->htable[hvalue];
 	     cur;
 	     prev = cur, cur = cur->next) {
@@ -100,9 +101,9 @@
 	struct avtab_node *prev, *cur, *newnode;
 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-	if (!h)
+	if (!h || !h->htable)
 		return NULL;
-	hvalue = AVTAB_HASH(key);
+	hvalue = avtab_hash(key, h->mask);
 	for (prev = NULL, cur = h->htable[hvalue];
 	     cur;
 	     prev = cur, cur = cur->next) {
@@ -132,10 +133,10 @@
 	struct avtab_node *cur;
 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-	if (!h)
+	if (!h || !h->htable)
 		return NULL;
 
-	hvalue = AVTAB_HASH(key);
+	hvalue = avtab_hash(key, h->mask);
 	for (cur = h->htable[hvalue]; cur; cur = cur->next) {
 		if (key->source_type == cur->key.source_type &&
 		    key->target_type == cur->key.target_type &&
@@ -167,10 +168,10 @@
 	struct avtab_node *cur;
 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-	if (!h)
+	if (!h || !h->htable)
 		return NULL;
 
-	hvalue = AVTAB_HASH(key);
+	hvalue = avtab_hash(key, h->mask);
 	for (cur = h->htable[hvalue]; cur; cur = cur->next) {
 		if (key->source_type == cur->key.source_type &&
 		    key->target_type == cur->key.target_type &&
@@ -228,7 +229,7 @@
 	if (!h || !h->htable)
 		return;
 
-	for (i = 0; i < AVTAB_SIZE; i++) {
+	for (i = 0; i < h->nslot; i++) {
 		cur = h->htable[i];
 		while (cur != NULL) {
 			temp = cur;
@@ -237,32 +238,63 @@
 		}
 		h->htable[i] = NULL;
 	}
-	vfree(h->htable);
+	kfree(h->htable);
 	h->htable = NULL;
+	h->nslot = 0;
+	h->mask = 0;
 }
 
-
 int avtab_init(struct avtab *h)
 {
-	int i;
+	h->htable = NULL;
+	h->nel = 0;
+	return 0;
+}
 
-	h->htable = vmalloc(sizeof(*(h->htable)) * AVTAB_SIZE);
+int avtab_alloc(struct avtab *h, u32 nrules)
+{
+	u16 mask = 0;
+	u32 shift = 0;
+	u32 work = nrules;
+	u32 nslot = 0;
+
+	if (nrules == 0)
+		goto avtab_alloc_out;
+
+	while (work) {
+		work  = work >> 1;
+		shift++;
+	}
+	if (shift > 2)
+		shift = shift - 2;
+	nslot = 1 << shift;
+	if (nslot > MAX_AVTAB_SIZE)
+		nslot = MAX_AVTAB_SIZE;
+	mask = nslot - 1;
+
+	h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
 	if (!h->htable)
 		return -ENOMEM;
-	for (i = 0; i < AVTAB_SIZE; i++)
-		h->htable[i] = NULL;
+
+ avtab_alloc_out:
 	h->nel = 0;
+	h->nslot = nslot;
+	h->mask = mask;
+	printk(KERN_DEBUG "SELinux:%d avtab hash slots allocated."
+	       "Num of rules:%d\n", h->nslot, nrules);
 	return 0;
 }
 
 void avtab_hash_eval(struct avtab *h, char *tag)
 {
 	int i, chain_len, slots_used, max_chain_len;
+	unsigned long long chain2_len_sum;
 	struct avtab_node *cur;
 
 	slots_used = 0;
 	max_chain_len = 0;
-	for (i = 0; i < AVTAB_SIZE; i++) {
+	chain2_len_sum = 0;
+	for (i = 0; i < h->nslot; i++) {
 		cur = h->htable[i];
 		if (cur) {
 			slots_used++;
@@ -274,12 +306,14 @@
 
 			if (chain_len > max_chain_len)
 				max_chain_len = chain_len;
+			chain2_len_sum += chain_len * chain_len;
 		}
 	}
 
 	printk(KERN_DEBUG "%s:  %d entries and %d/%d buckets used, longest "
-	       "chain length %d\n", tag, h->nel, slots_used, AVTAB_SIZE,
-	       max_chain_len);
+	       "chain length %d sum of chain length^2 %Lu\n",
+	       tag, h->nel, slots_used, h->nslot, max_chain_len,
+	       chain2_len_sum);
 }
 
 static uint16_t spec_order[] = {
@@ -419,6 +453,11 @@
 		rc = -EINVAL;
 		goto bad;
 	}
+
+	rc = avtab_alloc(a, nel);
+	if (rc)
+		goto bad;
+
 	for (i = 0; i < nel; i++) {
 		rc = avtab_read_item(fp,vers, a, avtab_insertf, NULL);
 		if (rc) {
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 0a90d93..d8edf8c 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -16,6 +16,9 @@
  *	This program is free software; you can redistribute it and/or modify
  *  	it under the terms of the GNU General Public License as published by
  *	the Free Software Foundation, version 2.
+ *
+ * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * 	Tuned number of hash slots for avtab to reduce memory usage
  */
 #ifndef _SS_AVTAB_H_
 #define _SS_AVTAB_H_
@@ -50,9 +53,13 @@
 struct avtab {
 	struct avtab_node **htable;
 	u32 nel;	/* number of elements */
+	u32 nslot;      /* number of hash slots */
+	u16 mask;       /* mask to compute hash func */
+
 };
 
 int avtab_init(struct avtab *);
+int avtab_alloc(struct avtab *, u32);
 struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
 void avtab_destroy(struct avtab *h);
 void avtab_hash_eval(struct avtab *h, char *tag);
@@ -74,11 +81,10 @@
 void avtab_cache_init(void);
 void avtab_cache_destroy(void);
 
-#define AVTAB_HASH_BITS 15
-#define AVTAB_HASH_BUCKETS (1 << AVTAB_HASH_BITS)
-#define AVTAB_HASH_MASK (AVTAB_HASH_BUCKETS-1)
-
-#define AVTAB_SIZE AVTAB_HASH_BUCKETS
+#define MAX_AVTAB_HASH_BITS 13
+#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
+#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
+#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
 
 #endif	/* _SS_AVTAB_H_ */
 
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index d2737ed..45b93a8 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -456,6 +456,10 @@
 
 	len = le32_to_cpu(buf[0]);
 
+	rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
+	if (rc)
+		goto err;
+
 	for (i = 0; i < len; i++) {
 		node = kzalloc(sizeof(struct cond_node), GFP_KERNEL);
 		if (!node)
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index ce492a6..c1a6b22 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -10,6 +10,10 @@
  *
  * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
  */
+/*
+ * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *      Applied standard bit operations to improve bitmap scanning.
+ */
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -29,7 +33,7 @@
 	n2 = e2->node;
 	while (n1 && n2 &&
 	       (n1->startbit == n2->startbit) &&
-	       (n1->map == n2->map)) {
+	       !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
 		n1 = n1->next;
 		n2 = n2->next;
 	}
@@ -54,7 +58,7 @@
 			return -ENOMEM;
 		}
 		new->startbit = n->startbit;
-		new->map = n->map;
+		memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
 		new->next = NULL;
 		if (prev)
 			prev->next = new;
@@ -84,13 +88,15 @@
 {
 	struct ebitmap_node *e_iter = ebmap->node;
 	struct netlbl_lsm_secattr_catmap *c_iter;
-	u32 cmap_idx;
+	u32 cmap_idx, cmap_sft;
+	int i;
 
-	/* This function is a much simpler because SELinux's MAPTYPE happens
-	 * to be the same as NetLabel's NETLBL_CATMAP_MAPTYPE, if MAPTYPE is
-	 * changed from a u64 this function will most likely need to be changed
-	 * as well.  It's not ideal but I think the tradeoff in terms of
-	 * neatness and speed is worth it. */
+	/* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64,
+	 * however, it is not always compatible with an array of unsigned long
+	 * in ebitmap_node.
+	 * In addition, you should pay attention the following implementation
+	 * assumes unsigned long has a width equal with or less than 64-bit.
+	 */
 
 	if (e_iter == NULL) {
 		*catmap = NULL;
@@ -104,19 +110,27 @@
 	c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1);
 
 	while (e_iter != NULL) {
-		if (e_iter->startbit >=
-		    (c_iter->startbit + NETLBL_CATMAP_SIZE)) {
-			c_iter->next = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
-			if (c_iter->next == NULL)
-				goto netlbl_export_failure;
-			c_iter = c_iter->next;
-			c_iter->startbit = e_iter->startbit &
-				           ~(NETLBL_CATMAP_SIZE - 1);
+		for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
+			unsigned int delta, e_startbit, c_endbit;
+
+			e_startbit = e_iter->startbit + i * EBITMAP_UNIT_SIZE;
+			c_endbit = c_iter->startbit + NETLBL_CATMAP_SIZE;
+			if (e_startbit >= c_endbit) {
+				c_iter->next
+				  = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+				if (c_iter->next == NULL)
+					goto netlbl_export_failure;
+				c_iter = c_iter->next;
+				c_iter->startbit
+				  = e_startbit & ~(NETLBL_CATMAP_SIZE - 1);
+			}
+			delta = e_startbit - c_iter->startbit;
+			cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
+			cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
+			c_iter->bitmap[cmap_idx]
+				|= e_iter->maps[cmap_idx] << cmap_sft;
+			e_iter = e_iter->next;
 		}
-		cmap_idx = (e_iter->startbit - c_iter->startbit) /
-			   NETLBL_CATMAP_MAPSIZE;
-		c_iter->bitmap[cmap_idx] = e_iter->map;
-		e_iter = e_iter->next;
 	}
 
 	return 0;
@@ -128,7 +142,7 @@
 
 /**
  * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
- * @ebmap: the ebitmap to export
+ * @ebmap: the ebitmap to import
  * @catmap: the NetLabel category bitmap
  *
  * Description:
@@ -142,36 +156,50 @@
 	struct ebitmap_node *e_iter = NULL;
 	struct ebitmap_node *emap_prev = NULL;
 	struct netlbl_lsm_secattr_catmap *c_iter = catmap;
-	u32 c_idx;
+	u32 c_idx, c_pos, e_idx, e_sft;
 
-	/* This function is a much simpler because SELinux's MAPTYPE happens
-	 * to be the same as NetLabel's NETLBL_CATMAP_MAPTYPE, if MAPTYPE is
-	 * changed from a u64 this function will most likely need to be changed
-	 * as well.  It's not ideal but I think the tradeoff in terms of
-	 * neatness and speed is worth it. */
+	/* NetLabel's NETLBL_CATMAP_MAPTYPE is defined as an array of u64,
+	 * however, it is not always compatible with an array of unsigned long
+	 * in ebitmap_node.
+	 * In addition, you should pay attention the following implementation
+	 * assumes unsigned long has a width equal with or less than 64-bit.
+	 */
 
 	do {
 		for (c_idx = 0; c_idx < NETLBL_CATMAP_MAPCNT; c_idx++) {
-			if (c_iter->bitmap[c_idx] == 0)
+			unsigned int delta;
+			u64 map = c_iter->bitmap[c_idx];
+
+			if (!map)
 				continue;
 
-			e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
-			if (e_iter == NULL)
-				goto netlbl_import_failure;
-			if (emap_prev == NULL)
-				ebmap->node = e_iter;
-			else
-				emap_prev->next = e_iter;
-			emap_prev = e_iter;
-
-			e_iter->startbit = c_iter->startbit +
-				           NETLBL_CATMAP_MAPSIZE * c_idx;
-			e_iter->map = c_iter->bitmap[c_idx];
+			c_pos = c_iter->startbit
+				+ c_idx * NETLBL_CATMAP_MAPSIZE;
+			if (!e_iter
+			    || c_pos >= e_iter->startbit + EBITMAP_SIZE) {
+				e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
+				if (!e_iter)
+					goto netlbl_import_failure;
+				e_iter->startbit
+					= c_pos - (c_pos % EBITMAP_SIZE);
+				if (emap_prev == NULL)
+					ebmap->node = e_iter;
+				else
+					emap_prev->next = e_iter;
+				emap_prev = e_iter;
+			}
+			delta = c_pos - e_iter->startbit;
+			e_idx = delta / EBITMAP_UNIT_SIZE;
+			e_sft = delta % EBITMAP_UNIT_SIZE;
+			while (map) {
+				e_iter->maps[e_idx++] |= map & (-1UL);
+				map = EBITMAP_SHIFT_UNIT_SIZE(map);
+			}
 		}
 		c_iter = c_iter->next;
 	} while (c_iter != NULL);
 	if (e_iter != NULL)
-		ebmap->highbit = e_iter->startbit + MAPSIZE;
+		ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
 	else
 		ebitmap_destroy(ebmap);
 
@@ -186,6 +214,7 @@
 int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
 {
 	struct ebitmap_node *n1, *n2;
+	int i;
 
 	if (e1->highbit < e2->highbit)
 		return 0;
@@ -197,8 +226,10 @@
 			n1 = n1->next;
 			continue;
 		}
-		if ((n1->map & n2->map) != n2->map)
-			return 0;
+		for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
+			if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
+				return 0;
+		}
 
 		n1 = n1->next;
 		n2 = n2->next;
@@ -219,12 +250,8 @@
 
 	n = e->node;
 	while (n && (n->startbit <= bit)) {
-		if ((n->startbit + MAPSIZE) > bit) {
-			if (n->map & (MAPBIT << (bit - n->startbit)))
-				return 1;
-			else
-				return 0;
-		}
+		if ((n->startbit + EBITMAP_SIZE) > bit)
+			return ebitmap_node_get_bit(n, bit);
 		n = n->next;
 	}
 
@@ -238,31 +265,35 @@
 	prev = NULL;
 	n = e->node;
 	while (n && n->startbit <= bit) {
-		if ((n->startbit + MAPSIZE) > bit) {
+		if ((n->startbit + EBITMAP_SIZE) > bit) {
 			if (value) {
-				n->map |= (MAPBIT << (bit - n->startbit));
+				ebitmap_node_set_bit(n, bit);
 			} else {
-				n->map &= ~(MAPBIT << (bit - n->startbit));
-				if (!n->map) {
-					/* drop this node from the bitmap */
+				unsigned int s;
 
-					if (!n->next) {
-						/*
-						 * this was the highest map
-						 * within the bitmap
-						 */
-						if (prev)
-							e->highbit = prev->startbit + MAPSIZE;
-						else
-							e->highbit = 0;
-					}
+				ebitmap_node_clr_bit(n, bit);
+
+				s = find_first_bit(n->maps, EBITMAP_SIZE);
+				if (s < EBITMAP_SIZE)
+					return 0;
+
+				/* drop this node from the bitmap */
+				if (!n->next) {
+					/*
+					 * this was the highest map
+					 * within the bitmap
+					 */
 					if (prev)
-						prev->next = n->next;
+						e->highbit = prev->startbit
+							     + EBITMAP_SIZE;
 					else
-						e->node = n->next;
-
-					kfree(n);
+						e->highbit = 0;
 				}
+				if (prev)
+					prev->next = n->next;
+				else
+					e->node = n->next;
+				kfree(n);
 			}
 			return 0;
 		}
@@ -277,12 +308,12 @@
 	if (!new)
 		return -ENOMEM;
 
-	new->startbit = bit & ~(MAPSIZE - 1);
-	new->map = (MAPBIT << (bit - new->startbit));
+	new->startbit = bit - (bit % EBITMAP_SIZE);
+	ebitmap_node_set_bit(new, bit);
 
 	if (!n)
 		/* this node will be the highest map within the bitmap */
-		e->highbit = new->startbit + MAPSIZE;
+		e->highbit = new->startbit + EBITMAP_SIZE;
 
 	if (prev) {
 		new->next = prev->next;
@@ -316,11 +347,11 @@
 
 int ebitmap_read(struct ebitmap *e, void *fp)
 {
-	int rc;
-	struct ebitmap_node *n, *l;
+	struct ebitmap_node *n = NULL;
+	u32 mapunit, count, startbit, index;
+	u64 map;
 	__le32 buf[3];
-	u32 mapsize, count, i;
-	__le64 map;
+	int rc, i;
 
 	ebitmap_init(e);
 
@@ -328,85 +359,88 @@
 	if (rc < 0)
 		goto out;
 
-	mapsize = le32_to_cpu(buf[0]);
+	mapunit = le32_to_cpu(buf[0]);
 	e->highbit = le32_to_cpu(buf[1]);
 	count = le32_to_cpu(buf[2]);
 
-	if (mapsize != MAPSIZE) {
+	if (mapunit != sizeof(u64) * 8) {
 		printk(KERN_ERR "security: ebitmap: map size %u does not "
-		       "match my size %Zd (high bit was %d)\n", mapsize,
-		       MAPSIZE, e->highbit);
+		       "match my size %Zd (high bit was %d)\n",
+		       mapunit, sizeof(u64) * 8, e->highbit);
 		goto bad;
 	}
+
+	/* round up e->highbit */
+	e->highbit += EBITMAP_SIZE - 1;
+	e->highbit -= (e->highbit % EBITMAP_SIZE);
+
 	if (!e->highbit) {
 		e->node = NULL;
 		goto ok;
 	}
-	if (e->highbit & (MAPSIZE - 1)) {
-		printk(KERN_ERR "security: ebitmap: high bit (%d) is not a "
-		       "multiple of the map size (%Zd)\n", e->highbit, MAPSIZE);
-		goto bad;
-	}
-	l = NULL;
+
 	for (i = 0; i < count; i++) {
-		rc = next_entry(buf, fp, sizeof(u32));
+		rc = next_entry(&startbit, fp, sizeof(u32));
 		if (rc < 0) {
 			printk(KERN_ERR "security: ebitmap: truncated map\n");
 			goto bad;
 		}
-		n = kzalloc(sizeof(*n), GFP_KERNEL);
-		if (!n) {
-			printk(KERN_ERR "security: ebitmap: out of memory\n");
-			rc = -ENOMEM;
+		startbit = le32_to_cpu(startbit);
+
+		if (startbit & (mapunit - 1)) {
+			printk(KERN_ERR "security: ebitmap start bit (%d) is "
+			       "not a multiple of the map unit size (%u)\n",
+			       startbit, mapunit);
+			goto bad;
+		}
+		if (startbit > e->highbit - mapunit) {
+			printk(KERN_ERR "security: ebitmap start bit (%d) is "
+			       "beyond the end of the bitmap (%u)\n",
+			       startbit, (e->highbit - mapunit));
 			goto bad;
 		}
 
-		n->startbit = le32_to_cpu(buf[0]);
+		if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
+			struct ebitmap_node *tmp;
+			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+			if (!tmp) {
+				printk(KERN_ERR
+				       "security: ebitmap: out of memory\n");
+				rc = -ENOMEM;
+				goto bad;
+			}
+			/* round down */
+			tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
+			if (n) {
+				n->next = tmp;
+			} else {
+				e->node = tmp;
+			}
+			n = tmp;
+		} else if (startbit <= n->startbit) {
+			printk(KERN_ERR "security: ebitmap: start bit %d"
+			       " comes after start bit %d\n",
+			       startbit, n->startbit);
+			goto bad;
+		}
 
-		if (n->startbit & (MAPSIZE - 1)) {
-			printk(KERN_ERR "security: ebitmap start bit (%d) is "
-			       "not a multiple of the map size (%Zd)\n",
-			       n->startbit, MAPSIZE);
-			goto bad_free;
-		}
-		if (n->startbit > (e->highbit - MAPSIZE)) {
-			printk(KERN_ERR "security: ebitmap start bit (%d) is "
-			       "beyond the end of the bitmap (%Zd)\n",
-			       n->startbit, (e->highbit - MAPSIZE));
-			goto bad_free;
-		}
 		rc = next_entry(&map, fp, sizeof(u64));
 		if (rc < 0) {
 			printk(KERN_ERR "security: ebitmap: truncated map\n");
-			goto bad_free;
+			goto bad;
 		}
-		n->map = le64_to_cpu(map);
+		map = le64_to_cpu(map);
 
-		if (!n->map) {
-			printk(KERN_ERR "security: ebitmap: null map in "
-			       "ebitmap (startbit %d)\n", n->startbit);
-			goto bad_free;
+		index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
+		while (map) {
+			n->maps[index++] = map & (-1UL);
+			map = EBITMAP_SHIFT_UNIT_SIZE(map);
 		}
-		if (l) {
-			if (n->startbit <= l->startbit) {
-				printk(KERN_ERR "security: ebitmap: start "
-				       "bit %d comes after start bit %d\n",
-				       n->startbit, l->startbit);
-				goto bad_free;
-			}
-			l->next = n;
-		} else
-			e->node = n;
-
-		l = n;
 	}
-
 ok:
 	rc = 0;
 out:
 	return rc;
-bad_free:
-	kfree(n);
 bad:
 	if (!rc)
 		rc = -EINVAL;
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 1270e34..f283b43 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -16,14 +16,18 @@
 
 #include <net/netlabel.h>
 
-#define MAPTYPE u64			/* portion of bitmap in each node */
-#define MAPSIZE (sizeof(MAPTYPE) * 8)	/* number of bits in node bitmap */
-#define MAPBIT  1ULL			/* a bit in the node bitmap */
+#define EBITMAP_UNIT_NUMS	((32 - sizeof(void *) - sizeof(u32))	\
+					/ sizeof(unsigned long))
+#define EBITMAP_UNIT_SIZE	BITS_PER_LONG
+#define EBITMAP_SIZE		(EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
+#define EBITMAP_BIT		1ULL
+#define EBITMAP_SHIFT_UNIT_SIZE(x)					\
+	(((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2)
 
 struct ebitmap_node {
-	u32 startbit;		/* starting position in the total bitmap */
-	MAPTYPE map;		/* this node's portion of the bitmap */
 	struct ebitmap_node *next;
+	unsigned long maps[EBITMAP_UNIT_NUMS];
+	u32 startbit;
 };
 
 struct ebitmap {
@@ -34,11 +38,17 @@
 #define ebitmap_length(e) ((e)->highbit)
 #define ebitmap_startbit(e) ((e)->node ? (e)->node->startbit : 0)
 
-static inline unsigned int ebitmap_start(struct ebitmap *e,
-					 struct ebitmap_node **n)
+static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
+						  struct ebitmap_node **n)
 {
-	*n = e->node;
-	return ebitmap_startbit(e);
+	unsigned int ofs;
+
+	for (*n = e->node; *n; *n = (*n)->next) {
+		ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
+		if (ofs < EBITMAP_SIZE)
+			return (*n)->startbit + ofs;
+	}
+	return ebitmap_length(e);
 }
 
 static inline void ebitmap_init(struct ebitmap *e)
@@ -46,28 +56,65 @@
 	memset(e, 0, sizeof(*e));
 }
 
-static inline unsigned int ebitmap_next(struct ebitmap_node **n,
-					unsigned int bit)
+static inline unsigned int ebitmap_next_positive(struct ebitmap *e,
+						 struct ebitmap_node **n,
+						 unsigned int bit)
 {
-	if ((bit == ((*n)->startbit + MAPSIZE - 1)) &&
-	    (*n)->next) {
-		*n = (*n)->next;
-		return (*n)->startbit;
-	}
+	unsigned int ofs;
 
-	return (bit+1);
+	ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1);
+	if (ofs < EBITMAP_SIZE)
+		return ofs + (*n)->startbit;
+
+	for (*n = (*n)->next; *n; *n = (*n)->next) {
+		ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
+		if (ofs < EBITMAP_SIZE)
+			return ofs + (*n)->startbit;
+	}
+	return ebitmap_length(e);
 }
 
-static inline int ebitmap_node_get_bit(struct ebitmap_node * n,
+#define EBITMAP_NODE_INDEX(node, bit)	\
+	(((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
+#define EBITMAP_NODE_OFFSET(node, bit)	\
+	(((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE)
+
+static inline int ebitmap_node_get_bit(struct ebitmap_node *n,
 				       unsigned int bit)
 {
-	if (n->map & (MAPBIT << (bit - n->startbit)))
+	unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+	unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+	BUG_ON(index >= EBITMAP_UNIT_NUMS);
+	if ((n->maps[index] & (EBITMAP_BIT << ofs)))
 		return 1;
 	return 0;
 }
 
-#define ebitmap_for_each_bit(e, n, bit) \
-	for (bit = ebitmap_start(e, &n); bit < ebitmap_length(e); bit = ebitmap_next(&n, bit)) \
+static inline void ebitmap_node_set_bit(struct ebitmap_node *n,
+					unsigned int bit)
+{
+	unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+	unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+	BUG_ON(index >= EBITMAP_UNIT_NUMS);
+	n->maps[index] |= (EBITMAP_BIT << ofs);
+}
+
+static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
+					unsigned int bit)
+{
+	unsigned int index = EBITMAP_NODE_INDEX(n, bit);
+	unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
+
+	BUG_ON(index >= EBITMAP_UNIT_NUMS);
+	n->maps[index] &= ~(EBITMAP_BIT << ofs);
+}
+
+#define ebitmap_for_each_positive_bit(e, n, bit)	\
+	for (bit = ebitmap_start_positive(e, &n);	\
+	     bit < ebitmap_length(e);			\
+	     bit = ebitmap_next_positive(e, &n, bit))	\
 
 int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
 int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 4a8bab2..9a11dea 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -34,7 +34,9 @@
  */
 int mls_compute_context_len(struct context * context)
 {
-	int i, l, len, range;
+	int i, l, len, head, prev;
+	char *nm;
+	struct ebitmap *e;
 	struct ebitmap_node *node;
 
 	if (!selinux_mls_enabled)
@@ -42,31 +44,33 @@
 
 	len = 1; /* for the beginning ":" */
 	for (l = 0; l < 2; l++) {
-		range = 0;
-		len += strlen(policydb.p_sens_val_to_name[context->range.level[l].sens - 1]);
+		int index_sens = context->range.level[l].sens;
+		len += strlen(policydb.p_sens_val_to_name[index_sens - 1]);
 
-		ebitmap_for_each_bit(&context->range.level[l].cat, node, i) {
-			if (ebitmap_node_get_bit(node, i)) {
-				if (range) {
-					range++;
-					continue;
+		/* categories */
+		head = -2;
+		prev = -2;
+		e = &context->range.level[l].cat;
+		ebitmap_for_each_positive_bit(e, node, i) {
+			if (i - prev > 1) {
+				/* one or more negative bits are skipped */
+				if (head != prev) {
+					nm = policydb.p_cat_val_to_name[prev];
+					len += strlen(nm) + 1;
 				}
-
-				len += strlen(policydb.p_cat_val_to_name[i]) + 1;
-				range++;
-			} else {
-				if (range > 1)
-					len += strlen(policydb.p_cat_val_to_name[i - 1]) + 1;
-				range = 0;
+				nm = policydb.p_cat_val_to_name[i];
+				len += strlen(nm) + 1;
+				head = i;
 			}
+			prev = i;
 		}
-		/* Handle case where last category is the end of range */
-		if (range > 1)
-			len += strlen(policydb.p_cat_val_to_name[i - 1]) + 1;
-
+		if (prev != head) {
+			nm = policydb.p_cat_val_to_name[prev];
+			len += strlen(nm) + 1;
+		}
 		if (l == 0) {
 			if (mls_level_eq(&context->range.level[0],
-			                 &context->range.level[1]))
+					 &context->range.level[1]))
 				break;
 			else
 				len++;
@@ -84,8 +88,9 @@
 void mls_sid_to_context(struct context *context,
                         char **scontext)
 {
-	char *scontextp;
-	int i, l, range, wrote_sep;
+	char *scontextp, *nm;
+	int i, l, head, prev;
+	struct ebitmap *e;
 	struct ebitmap_node *node;
 
 	if (!selinux_mls_enabled)
@@ -97,61 +102,54 @@
 	scontextp++;
 
 	for (l = 0; l < 2; l++) {
-		range = 0;
-		wrote_sep = 0;
 		strcpy(scontextp,
 		       policydb.p_sens_val_to_name[context->range.level[l].sens - 1]);
-		scontextp += strlen(policydb.p_sens_val_to_name[context->range.level[l].sens - 1]);
+		scontextp += strlen(scontextp);
 
 		/* categories */
-		ebitmap_for_each_bit(&context->range.level[l].cat, node, i) {
-			if (ebitmap_node_get_bit(node, i)) {
-				if (range) {
-					range++;
-					continue;
-				}
-
-				if (!wrote_sep) {
-					*scontextp++ = ':';
-					wrote_sep = 1;
-				} else
-					*scontextp++ = ',';
-				strcpy(scontextp, policydb.p_cat_val_to_name[i]);
-				scontextp += strlen(policydb.p_cat_val_to_name[i]);
-				range++;
-			} else {
-				if (range > 1) {
-					if (range > 2)
+		head = -2;
+		prev = -2;
+		e = &context->range.level[l].cat;
+		ebitmap_for_each_positive_bit(e, node, i) {
+			if (i - prev > 1) {
+				/* one or more negative bits are skipped */
+				if (prev != head) {
+					if (prev - head > 1)
 						*scontextp++ = '.';
 					else
 						*scontextp++ = ',';
-
-					strcpy(scontextp, policydb.p_cat_val_to_name[i - 1]);
-					scontextp += strlen(policydb.p_cat_val_to_name[i - 1]);
+					nm = policydb.p_cat_val_to_name[prev];
+					strcpy(scontextp, nm);
+					scontextp += strlen(nm);
 				}
-				range = 0;
+				if (prev < 0)
+					*scontextp++ = ':';
+				else
+					*scontextp++ = ',';
+				nm = policydb.p_cat_val_to_name[i];
+				strcpy(scontextp, nm);
+				scontextp += strlen(nm);
+				head = i;
 			}
+			prev = i;
 		}
 
-		/* Handle case where last category is the end of range */
-		if (range > 1) {
-			if (range > 2)
+		if (prev != head) {
+			if (prev - head > 1)
 				*scontextp++ = '.';
 			else
 				*scontextp++ = ',';
-
-			strcpy(scontextp, policydb.p_cat_val_to_name[i - 1]);
-			scontextp += strlen(policydb.p_cat_val_to_name[i - 1]);
+			nm = policydb.p_cat_val_to_name[prev];
+			strcpy(scontextp, nm);
+			scontextp += strlen(nm);
 		}
 
 		if (l == 0) {
 			if (mls_level_eq(&context->range.level[0],
 			                 &context->range.level[1]))
 				break;
-			else {
-				*scontextp = '-';
-				scontextp++;
-			}
+			else
+				*scontextp++ = '-';
 		}
 	}
 
@@ -190,17 +188,15 @@
 		if (!levdatum)
 			return 0;
 
-		ebitmap_for_each_bit(&c->range.level[l].cat, node, i) {
-			if (ebitmap_node_get_bit(node, i)) {
-				if (i > p->p_cats.nprim)
-					return 0;
-				if (!ebitmap_get_bit(&levdatum->level->cat, i))
-					/*
-					 * Category may not be associated with
-					 * sensitivity in low level.
-					 */
-					return 0;
-			}
+		ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
+			if (i > p->p_cats.nprim)
+				return 0;
+			if (!ebitmap_get_bit(&levdatum->level->cat, i))
+				/*
+				 * Category may not be associated with
+				 * sensitivity in low level.
+				 */
+				return 0;
 		}
 	}
 
@@ -485,18 +481,16 @@
 		c->range.level[l].sens = levdatum->level->sens;
 
 		ebitmap_init(&bitmap);
-		ebitmap_for_each_bit(&c->range.level[l].cat, node, i) {
-			if (ebitmap_node_get_bit(node, i)) {
-				int rc;
+		ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
+			int rc;
 
-				catdatum = hashtab_search(newp->p_cats.table,
-				         	oldp->p_cat_val_to_name[i]);
-				if (!catdatum)
-					return -EINVAL;
-				rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
-				if (rc)
-					return rc;
-			}
+			catdatum = hashtab_search(newp->p_cats.table,
+						  oldp->p_cat_val_to_name[i]);
+			if (!catdatum)
+				return -EINVAL;
+			rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
+			if (rc)
+				return rc;
 		}
 		ebitmap_destroy(&c->range.level[l].cat);
 		c->range.level[l].cat = bitmap;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f05f97a..539828b 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -177,18 +177,15 @@
 
 	rc = roles_init(p);
 	if (rc)
-		goto out_free_avtab;
+		goto out_free_symtab;
 
 	rc = cond_policydb_init(p);
 	if (rc)
-		goto out_free_avtab;
+		goto out_free_symtab;
 
 out:
 	return rc;
 
-out_free_avtab:
-	avtab_destroy(&p->te_avtab);
-
 out_free_symtab:
 	for (i = 0; i < SYM_NUM; i++)
 		hashtab_destroy(p->symtab[i].table);
@@ -677,6 +674,8 @@
 	}
 	kfree(p->type_attr_map);
 
+	kfree(p->undefined_perms);
+
 	return;
 }
 
@@ -1530,6 +1529,8 @@
 			goto bad;
 		}
 	}
+	p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
+	p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
 
 	info = policydb_lookup_compat(p->policyvers);
 	if (!info) {
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 8319d5f..844d310 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -242,6 +242,10 @@
 	struct ebitmap *type_attr_map;
 
 	unsigned int policyvers;
+
+	unsigned int reject_unknown : 1;
+	unsigned int allow_unknown : 1;
+	u32 *undefined_perms;
 };
 
 extern void policydb_destroy(struct policydb *p);
@@ -253,6 +257,10 @@
 
 #define POLICYDB_CONFIG_MLS    1
 
+/* the config flags related to unknown classes/perms are bits 2 and 3 */
+#define REJECT_UNKNOWN	0x00000002
+#define ALLOW_UNKNOWN	0x00000004
+
 #define OBJECT_R "object_r"
 #define OBJECT_R_VAL 1
 
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 6100fc0..d572dc9 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -292,6 +292,7 @@
 	struct class_datum *tclass_datum;
 	struct ebitmap *sattr, *tattr;
 	struct ebitmap_node *snode, *tnode;
+	const struct selinux_class_perm *kdefs = &selinux_class_perm;
 	unsigned int i, j;
 
 	/*
@@ -305,13 +306,6 @@
 		    tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
 			tclass = SECCLASS_NETLINK_SOCKET;
 
-	if (!tclass || tclass > policydb.p_classes.nprim) {
-		printk(KERN_ERR "security_compute_av:  unrecognized class %d\n",
-		       tclass);
-		return -EINVAL;
-	}
-	tclass_datum = policydb.class_val_to_struct[tclass - 1];
-
 	/*
 	 * Initialize the access vectors to the default values.
 	 */
@@ -322,6 +316,36 @@
 	avd->seqno = latest_granting;
 
 	/*
+	 * Check for all the invalid cases.
+	 * - tclass 0
+	 * - tclass > policy and > kernel
+	 * - tclass > policy but is a userspace class
+	 * - tclass > policy but we do not allow unknowns
+	 */
+	if (unlikely(!tclass))
+		goto inval_class;
+	if (unlikely(tclass > policydb.p_classes.nprim))
+		if (tclass > kdefs->cts_len ||
+		    !kdefs->class_to_string[tclass - 1] ||
+		    !policydb.allow_unknown)
+			goto inval_class;
+
+	/*
+	 * Kernel class and we allow unknown so pad the allow decision
+	 * the pad will be all 1 for unknown classes.
+	 */
+	if (tclass <= kdefs->cts_len && policydb.allow_unknown)
+		avd->allowed = policydb.undefined_perms[tclass - 1];
+
+	/*
+	 * Not in policy. Since decision is completed (all 1 or all 0) return.
+	 */
+	if (unlikely(tclass > policydb.p_classes.nprim))
+		return 0;
+
+	tclass_datum = policydb.class_val_to_struct[tclass - 1];
+
+	/*
 	 * If a specific type enforcement rule was defined for
 	 * this permission check, then use it.
 	 */
@@ -329,12 +353,8 @@
 	avkey.specified = AVTAB_AV;
 	sattr = &policydb.type_attr_map[scontext->type - 1];
 	tattr = &policydb.type_attr_map[tcontext->type - 1];
-	ebitmap_for_each_bit(sattr, snode, i) {
-		if (!ebitmap_node_get_bit(snode, i))
-			continue;
-		ebitmap_for_each_bit(tattr, tnode, j) {
-			if (!ebitmap_node_get_bit(tnode, j))
-				continue;
+	ebitmap_for_each_positive_bit(sattr, snode, i) {
+		ebitmap_for_each_positive_bit(tattr, tnode, j) {
 			avkey.source_type = i + 1;
 			avkey.target_type = j + 1;
 			for (node = avtab_search_node(&policydb.te_avtab, &avkey);
@@ -387,6 +407,10 @@
 	}
 
 	return 0;
+
+inval_class:
+	printk(KERN_ERR "%s:  unrecognized class %d\n", __FUNCTION__, tclass);
+	return -EINVAL;
 }
 
 static int security_validtrans_handle_fail(struct context *ocontext,
@@ -1054,6 +1078,13 @@
 	const char *def_class, *def_perm, *pol_class;
 	struct symtab *perms;
 
+	if (p->allow_unknown) {
+		u32 num_classes = kdefs->cts_len;
+		p->undefined_perms = kcalloc(num_classes, sizeof(u32), GFP_KERNEL);
+		if (!p->undefined_perms)
+			return -ENOMEM;
+	}
+
 	for (i = 1; i < kdefs->cts_len; i++) {
 		def_class = kdefs->class_to_string[i];
 		if (!def_class)
@@ -1062,6 +1093,10 @@
 			printk(KERN_INFO
 			       "security:  class %s not defined in policy\n",
 			       def_class);
+			if (p->reject_unknown)
+				return -EINVAL;
+			if (p->allow_unknown)
+				p->undefined_perms[i-1] = ~0U;
 			continue;
 		}
 		pol_class = p->p_class_val_to_name[i-1];
@@ -1087,12 +1122,16 @@
 			printk(KERN_INFO
 			       "security:  permission %s in class %s not defined in policy\n",
 			       def_perm, pol_class);
+			if (p->reject_unknown)
+				return -EINVAL;
+			if (p->allow_unknown)
+				p->undefined_perms[class_val-1] |= perm_val;
 			continue;
 		}
 		perdatum = hashtab_search(perms->table, def_perm);
 		if (perdatum == NULL) {
 			printk(KERN_ERR
-			       "security:  permission %s in class %s not found in policy\n",
+			       "security:  permission %s in class %s not found in policy, bad policy\n",
 			       def_perm, pol_class);
 			return -EINVAL;
 		}
@@ -1130,12 +1169,16 @@
 				printk(KERN_INFO
 				       "security:  permission %s in class %s not defined in policy\n",
 				       def_perm, pol_class);
+				if (p->reject_unknown)
+					return -EINVAL;
+				if (p->allow_unknown)
+					p->undefined_perms[class_val-1] |= (1 << j);
 				continue;
 			}
 			perdatum = hashtab_search(perms->table, def_perm);
 			if (perdatum == NULL) {
 				printk(KERN_ERR
-				       "security:  permission %s in class %s not found in policy\n",
+				       "security:  permission %s in class %s not found in policy, bad policy\n",
 				       def_perm, pol_class);
 				return -EINVAL;
 			}
@@ -1621,14 +1664,10 @@
 		goto out_unlock;
 	}
 
-	ebitmap_for_each_bit(&user->roles, rnode, i) {
-		if (!ebitmap_node_get_bit(rnode, i))
-			continue;
+	ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
 		role = policydb.role_val_to_struct[i];
 		usercon.role = i+1;
-		ebitmap_for_each_bit(&role->types, tnode, j) {
-			if (!ebitmap_node_get_bit(tnode, j))
-				continue;
+		ebitmap_for_each_positive_bit(&role->types, tnode, j) {
 			usercon.type = j+1;
 
 			if (mls_setup_user_range(fromcon, user, &usercon))
@@ -2102,6 +2141,16 @@
 	return rc;
 }
 
+int security_get_reject_unknown(void)
+{
+	return policydb.reject_unknown;
+}
+
+int security_get_allow_unknown(void)
+{
+	return policydb.allow_unknown;
+}
+
 struct selinux_audit_rule {
 	u32 au_seqno;
 	struct context au_ctxt;