Pull bugzilla-3241 into release branch
diff --git a/Makefile b/Makefile
index e9560c6f..4dcf25d 100644
--- a/Makefile
+++ b/Makefile
@@ -41,8 +41,9 @@
   KBUILD_VERBOSE = 0
 endif
 
-# Call sparse as part of compilation of C files
-# Use 'make C=1' to enable sparse checking
+# Call checker as part of compilation of C files
+# Use 'make C=1' to enable checking (sparse, by default)
+# Override with 'make C=1 CHECK=checker_executable CHECKFLAGS=....'
 
 ifdef C
   ifeq ("$(origin C)", "command line")
@@ -1060,8 +1061,8 @@
 
 	@echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
 	@echo  '  make O=dir [targets] Locate all output files in "dir", including .config'
-	@echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse)'
-	@echo  '  make C=2   [targets] Force check of all c source with $$CHECK (sparse)'
+	@echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse by default)'
+	@echo  '  make C=2   [targets] Force check of all c source with $$CHECK'
 	@echo  ''
 	@echo  'Execute "make" or "make all" to build all targets marked with [*] '
 	@echo  'For further info see the ./README file'
@@ -1352,7 +1353,7 @@
 
 a_flags = -Wp,-MD,$(depfile) $(AFLAGS) $(AFLAGS_KERNEL) \
 	  $(NOSTDINC_FLAGS) $(CPPFLAGS) \
-	  $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
+	  $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
 
 quiet_cmd_as_o_S = AS      $@
 cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 5e70c2f..cbc1184 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -38,6 +38,7 @@
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
 obj-$(CONFIG_K8_NB)		+= k8.o
+obj-$(CONFIG_AUDIT)		+= audit.o
 
 EXTRA_AFLAGS   := -traditional
 
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 50eb0e0..7b421b3 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -168,6 +168,8 @@
 	}
 }
 
+#ifdef CONFIG_SMP
+
 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
 {
 	struct alt_instr *a;
@@ -328,6 +330,8 @@
 	spin_unlock_irqrestore(&smp_alt, flags);
 }
 
+#endif
+
 void __init alternative_instructions(void)
 {
 	if (no_replacement) {
@@ -349,6 +353,7 @@
 	smp_alt_once = 1;
 #endif
 
+#ifdef CONFIG_SMP
 	if (smp_alt_once) {
 		if (1 == num_possible_cpus()) {
 			printk(KERN_INFO "SMP alternatives: switching to UP code\n");
@@ -370,4 +375,5 @@
 					    _text, _etext);
 		alternatives_smp_switch(0);
 	}
+#endif
 }
diff --git a/arch/i386/kernel/audit.c b/arch/i386/kernel/audit.c
new file mode 100644
index 0000000..5a53c6f
--- /dev/null
+++ b/arch/i386/kernel/audit.c
@@ -0,0 +1,23 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static int __init audit_classes_init(void)
+{
+	audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+	return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index dc5d897..89e8486 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -725,16 +725,15 @@
 
 #ifdef CONFIG_DEBUG_RODATA
 
-extern char __start_rodata, __end_rodata;
 void mark_rodata_ro(void)
 {
-	unsigned long addr = (unsigned long)&__start_rodata;
+	unsigned long addr = (unsigned long)__start_rodata;
 
-	for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
 		change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
 
-	printk ("Write protecting the kernel read-only data: %luk\n",
-			(unsigned long)(&__end_rodata - &__start_rodata) >> 10);
+	printk("Write protecting the kernel read-only data: %uk\n",
+			(__end_rodata - __start_rodata) >> 10);
 
 	/*
 	 * change_page_attr() requires a global_flush_tlb() call after it.
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
index 61cb60a..baad8c7 100644
--- a/arch/ia64/ia32/Makefile
+++ b/arch/ia64/ia32/Makefile
@@ -4,6 +4,7 @@
 
 obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \
 	 ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
+obj-$(CONFIG_AUDIT) += audit.o
 
 # Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
 # restore_ia32_fpstate_live() can be sure the live register contain user-level state.
diff --git a/arch/ia64/ia32/audit.c b/arch/ia64/ia32/audit.c
new file mode 100644
index 0000000..ab94f2e
--- /dev/null
+++ b/arch/ia64/ia32/audit.c
@@ -0,0 +1,11 @@
+#include <asm-i386/unistd.h>
+
+unsigned ia32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned ia32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 09a0dbc..0e4553f 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_IA64_MCA_RECOVERY)	+= mca_recovery.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o jprobes.o
 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR)	+= uncached.o
+obj-$(CONFIG_AUDIT)		+= audit.o
 mca_recovery-y			+= mca_drv.o mca_drv_asm.o
 
 # The gate DSO image is built using a special linker script.
diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c
new file mode 100644
index 0000000..f251293
--- /dev/null
+++ b/arch/ia64/kernel/audit.c
@@ -0,0 +1,29 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_IA32_SUPPORT
+	extern __u32 ia32_dir_class[];
+	extern __u32 ia32_chattr_class[];
+	audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
+#endif
+	audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+	return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b64602a..f2b96f1 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -27,6 +27,7 @@
 #include <asm/tlb.h>
 #include <asm/pdc_chassis.h>
 #include <asm/mmzone.h>
+#include <asm/sections.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
@@ -417,11 +418,10 @@
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void)
 {
-	extern char __start_rodata, __end_rodata;
 	/* rodata memory was already mapped with KERNEL_RO access rights by
            pagetable_init() and map_pages(). No need to do additional stuff here */
 	printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
-		(unsigned long)(&__end_rodata - &__start_rodata) >> 10);
+		(unsigned long)(__end_rodata - __start_rodata) >> 10);
 }
 #endif
 
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1a434a7..d8948c3 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -228,8 +228,9 @@
 sysc_nr_ok:
 	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
 sysc_do_restart:
+	l	%r8,BASED(.Lsysc_table)
 	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        l       %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+	l	%r8,0(%r7,%r8)	  # get system call addr.
         bnz     BASED(sysc_tracesys)
         basr    %r14,%r8          # call sys_xxxx
         st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
@@ -330,9 +331,10 @@
 	basr	%r14,%r1
 	clc	SP_R2(4,%r15),BASED(.Lnr_syscalls)
 	bnl	BASED(sysc_tracenogo)
+	l	%r8,BASED(.Lsysc_table)
 	l	%r7,SP_R2(%r15)        # strace might have changed the 
 	sll	%r7,2                  #  system call
-	l	%r8,sys_call_table-system_call(%r7,%r13)
+	l	%r8,0(%r7,%r8)
 sysc_tracego:
 	lm	%r3,%r6,SP_R3(%r15)
 	l	%r2,SP_ORIG_R2(%r15)
@@ -1009,6 +1011,7 @@
 .Ltrace:       .long  syscall_trace
 .Lvfork:       .long  sys_vfork
 .Lschedtail:   .long  schedule_tail
+.Lsysc_table:  .long  sys_call_table
 
 .Lcritical_start:
                .long  __critical_start + 0x80000000
@@ -1017,8 +1020,8 @@
 .Lcleanup_critical:
                .long  cleanup_critical
 
+	       .section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esa
 sys_call_table:
 #include "syscalls.S"
 #undef SYSCALL
-
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index edad607..1ca499f 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -991,6 +991,7 @@
 .Lcritical_end:
                .quad  __critical_end
 
+	       .section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esame
 sys_call_table:
 #include "syscalls.S"
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81dce185..eb6ebfe 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/bootmem.h>
+#include <linux/pfn.h>
 
 #include <asm/processor.h>
 #include <asm/system.h>
@@ -33,6 +34,7 @@
 #include <asm/lowcore.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include <asm/sections.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
@@ -89,17 +91,6 @@
         printk("%d pages swap cached\n",cached);
 }
 
-/* References to section boundaries */
-
-extern unsigned long _text;
-extern unsigned long _etext;
-extern unsigned long _edata;
-extern unsigned long __bss_start;
-extern unsigned long _end;
-
-extern unsigned long __init_begin;
-extern unsigned long __init_end;
-
 extern unsigned long __initdata zholes_size[];
 /*
  * paging_init() sets up the page tables
@@ -116,6 +107,10 @@
         unsigned long pfn = 0;
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
         static const int ssm_mask = 0x04000000L;
+	unsigned long ro_start_pfn, ro_end_pfn;
+
+	ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+	ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
 
 	/* unmap whole virtual address space */
 	
@@ -143,7 +138,10 @@
                 pg_dir++;
 
                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
-                        pte = pfn_pte(pfn, PAGE_KERNEL);
+			if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+				pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+			else
+				pte = pfn_pte(pfn, PAGE_KERNEL);
                         if (pfn >= max_low_pfn)
                                 pte_clear(&init_mm, 0, &pte);
                         set_pte(pg_table, pte);
@@ -175,6 +173,7 @@
 }
 
 #else /* CONFIG_64BIT */
+
 void __init paging_init(void)
 {
         pgd_t * pg_dir;
@@ -186,13 +185,15 @@
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
           _KERN_REGION_TABLE;
 	static const int ssm_mask = 0x04000000L;
-
 	unsigned long zones_size[MAX_NR_ZONES];
 	unsigned long dma_pfn, high_pfn;
+	unsigned long ro_start_pfn, ro_end_pfn;
 
 	memset(zones_size, 0, sizeof(zones_size));
 	dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
 	high_pfn = max_low_pfn;
+	ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+	ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
 
 	if (dma_pfn > high_pfn)
 		zones_size[ZONE_DMA] = high_pfn;
@@ -231,7 +232,10 @@
                         pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
 	
                         for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
-                                pte = pfn_pte(pfn, PAGE_KERNEL);
+				if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+					pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+				else
+					pte = pfn_pte(pfn, PAGE_KERNEL);
                                 if (pfn >= max_low_pfn) {
                                         pte_clear(&init_mm, 0, &pte); 
                                         continue;
@@ -282,6 +286,9 @@
                 reservedpages << (PAGE_SHIFT-10),
                 datasize >>10,
                 initsize >> 10);
+	printk("Write protected kernel read-only data: %#lx - %#lx\n",
+	       (unsigned long)&__start_rodata,
+	       PFN_ALIGN((unsigned long)&__end_rodata) - 1);
 }
 
 void free_initmem(void)
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 5992c32..8912cec 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -8,6 +8,7 @@
 #include "linux/kernel.h"
 #include "linux/string.h"
 #include "linux/fs.h"
+#include "linux/hardirq.h"
 #include "linux/highmem.h"
 #include "asm/page.h"
 #include "asm/pgtable.h"
@@ -38,7 +39,7 @@
 	return((unsigned long) phys);
 }
 
-static int do_op(unsigned long addr, int len, int is_write,
+static int do_op_one_page(unsigned long addr, int len, int is_write,
 		 int (*op)(unsigned long addr, int len, void *arg), void *arg)
 {
 	struct page *page;
@@ -49,9 +50,11 @@
 		return(-1);
 
 	page = phys_to_page(addr);
-	addr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
+	addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + (addr & ~PAGE_MASK);
+
 	n = (*op)(addr, len, arg);
-	kunmap(page);
+
+	kunmap_atomic(page, KM_UML_USERCOPY);
 
 	return(n);
 }
@@ -77,7 +80,7 @@
 	remain = len;
 
 	current->thread.fault_catcher = jmpbuf;
-	n = do_op(addr, size, is_write, op, arg);
+	n = do_op_one_page(addr, size, is_write, op, arg);
 	if(n != 0){
 		*res = (n < 0 ? remain : 0);
 		goto out;
@@ -91,7 +94,7 @@
 	}
 
 	while(addr < ((addr + remain) & PAGE_MASK)){
-		n = do_op(addr, PAGE_SIZE, is_write, op, arg);
+		n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
 		if(n != 0){
 			*res = (n < 0 ? remain : 0);
 			goto out;
@@ -105,7 +108,7 @@
 		goto out;
 	}
 
-	n = do_op(addr, remain, is_write, op, arg);
+	n = do_op_one_page(addr, remain, is_write, op, arg);
 	if(n != 0)
 		*res = (n < 0 ? remain : 0);
 	else *res = 0;
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index 362db05..48092b9 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -67,32 +67,53 @@
 	return err;
 }
 
-static int actually_do_remove(char *dir)
+/*
+ * Unlinks the files contained in @dir and then removes @dir.
+ * Doesn't handle directory trees, so it's not like rm -rf, but almost such. We
+ * ignore ENOENT errors for anything (they happen, strangely enough - possibly due
+ * to races between multiple dying UML threads).
+ */
+static int remove_files_and_dir(char *dir)
 {
 	DIR *directory;
 	struct dirent *ent;
 	int len;
 	char file[256];
+	int ret;
 
 	directory = opendir(dir);
-	if(directory == NULL)
-		return -errno;
+	if (directory == NULL) {
+		if (errno != ENOENT)
+			return -errno;
+		else
+			return 0;
+	}
 
-	while((ent = readdir(directory)) != NULL){
-		if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
+	while ((ent = readdir(directory)) != NULL) {
+		if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
 			continue;
 		len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1;
-		if(len > sizeof(file))
-			return -E2BIG;
+		if (len > sizeof(file)) {
+			ret = -E2BIG;
+			goto out;
+		}
 
 		sprintf(file, "%s/%s", dir, ent->d_name);
-		if(unlink(file) < 0)
-			return -errno;
+		if (unlink(file) < 0 && errno != ENOENT) {
+			ret = -errno;
+			goto out;
+		}
 	}
-	if(rmdir(dir) < 0)
-		return -errno;
 
-	return 0;
+	if (rmdir(dir) < 0 && errno != ENOENT) {
+		ret = -errno;
+		goto out;
+	}
+
+	ret = 0;
+out:
+	closedir(directory);
+	return ret;
 }
 
 /* This says that there isn't already a user of the specified directory even if
@@ -103,9 +124,10 @@
  * 	something other than UML sticking stuff in the directory
  *	this boot racing with a shutdown of the other UML
  * In any of these cases, the directory isn't useful for anything else.
+ *
+ * Boolean return: 1 if in use, 0 otherwise.
  */
-
-static int not_dead_yet(char *dir)
+static inline int is_umdir_used(char *dir)
 {
 	char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
 	char pid[sizeof("nnnnn\0")], *end;
@@ -113,7 +135,7 @@
 
 	n = snprintf(file, sizeof(file), "%s/pid", dir);
 	if(n >= sizeof(file)){
-		printk("not_dead_yet - pid filename too long\n");
+		printk("is_umdir_used - pid filename too long\n");
 		err = -E2BIG;
 		goto out;
 	}
@@ -123,7 +145,7 @@
 	if(fd < 0) {
 		fd = -errno;
 		if(fd != -ENOENT){
-			printk("not_dead_yet : couldn't open pid file '%s', "
+			printk("is_umdir_used : couldn't open pid file '%s', "
 			       "err = %d\n", file, -fd);
 		}
 		goto out;
@@ -132,18 +154,18 @@
 	err = 0;
 	n = read(fd, pid, sizeof(pid));
 	if(n < 0){
-		printk("not_dead_yet : couldn't read pid file '%s', "
+		printk("is_umdir_used : couldn't read pid file '%s', "
 		       "err = %d\n", file, errno);
 		goto out_close;
 	} else if(n == 0){
-		printk("not_dead_yet : couldn't read pid file '%s', "
+		printk("is_umdir_used : couldn't read pid file '%s', "
 		       "0-byte read\n", file);
 		goto out_close;
 	}
 
 	p = strtoul(pid, &end, 0);
 	if(end == pid){
-		printk("not_dead_yet : couldn't parse pid file '%s', "
+		printk("is_umdir_used : couldn't parse pid file '%s', "
 		       "errno = %d\n", file, errno);
 		goto out_close;
 	}
@@ -153,19 +175,32 @@
 		return 1;
 	}
 
-	err = actually_do_remove(dir);
-	if(err)
-		printk("not_dead_yet - actually_do_remove failed with "
-		       "err = %d\n", err);
-
-	return err;
-
 out_close:
 	close(fd);
 out:
 	return 0;
 }
 
+/*
+ * Try to remove the directory @dir unless it's in use.
+ * Precondition: @dir exists.
+ * Returns 0 for success, < 0 for failure in removal or if the directory is in
+ * use.
+ */
+static int umdir_take_if_dead(char *dir)
+{
+	int ret;
+	if (is_umdir_used(dir))
+		return -EEXIST;
+
+	ret = remove_files_and_dir(dir);
+	if (ret) {
+		printk("is_umdir_used - remove_files_and_dir failed with "
+		       "err = %d\n", ret);
+	}
+	return ret;
+}
+
 static void __init create_pid_file(void)
 {
 	char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
@@ -244,11 +279,7 @@
 		if(err != -EEXIST)
 			goto err;
 
-		/* 1   -> this umid is already in use
-		 * < 0 -> we couldn't remove the umid directory
-		 * In either case, we can't use this umid, so return -EEXIST.
-		 */
-		if(not_dead_yet(tmp) != 0)
+		if (umdir_take_if_dead(tmp) < 0)
 			goto err;
 
 		err = mkdir(tmp, 0777);
@@ -344,9 +375,9 @@
 	char dir[strlen(uml_dir) + UMID_LEN + 1], err;
 
 	sprintf(dir, "%s%s", uml_dir, umid);
-	err = actually_do_remove(dir);
+	err = remove_files_and_dir(dir);
 	if(err)
-		printf("remove_umid_dir - actually_do_remove failed with "
+		printf("remove_umid_dir - remove_files_and_dir failed with "
 		       "err = %d\n", err);
 }
 
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules
index 1347dc6..813077f 100644
--- a/arch/um/scripts/Makefile.rules
+++ b/arch/um/scripts/Makefile.rules
@@ -8,7 +8,7 @@
 USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
 
 $(USER_OBJS:.o=.%): \
-	c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(*F).o)
+	c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(basetarget).o)
 $(USER_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
 	-Dunix -D__unix__ -D__$(SUBARCH)__
 
@@ -17,7 +17,7 @@
 UNPROFILE_OBJS := $(foreach file,$(UNPROFILE_OBJS),$(obj)/$(file))
 
 $(UNPROFILE_OBJS:.o=.%): \
-	c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS)) $(CFLAGS_$(*F).o)
+	c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS)) $(CFLAGS_$(basetarget).o)
 $(UNPROFILE_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
 	-Dunix -D__unix__ -D__$(SUBARCH)__
 
diff --git a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile
index e9263b4..62bc5f5 100644
--- a/arch/x86_64/ia32/Makefile
+++ b/arch/x86_64/ia32/Makefile
@@ -11,6 +11,9 @@
 
 obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
 
+audit-class-$(CONFIG_AUDIT) := audit.o
+obj-$(CONFIG_IA32_EMULATION) += $(audit-class-y)
+
 $(obj)/syscall32_syscall.o: \
 	$(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
 
diff --git a/arch/x86_64/ia32/audit.c b/arch/x86_64/ia32/audit.c
new file mode 100644
index 0000000..ab94f2e
--- /dev/null
+++ b/arch/x86_64/ia32/audit.c
@@ -0,0 +1,11 @@
+#include <asm-i386/unistd.h>
+
+unsigned ia32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned ia32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index aeb9c56..819e84e 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -35,6 +35,7 @@
 obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer.o
 obj-$(CONFIG_X86_VSMP)		+= vsmp.o
 obj-$(CONFIG_K8_NB)		+= k8.o
+obj-$(CONFIG_AUDIT)		+= audit.o
 
 obj-$(CONFIG_MODULES)		+= module.o
 
diff --git a/arch/x86_64/kernel/audit.c b/arch/x86_64/kernel/audit.c
new file mode 100644
index 0000000..a067aa4
--- /dev/null
+++ b/arch/x86_64/kernel/audit.c
@@ -0,0 +1,29 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_IA32_EMULATION
+	extern __u32 ia32_dir_class[];
+	extern __u32 ia32_chattr_class[];
+	audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
+#endif
+	audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+	audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+	return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 72f140f..d14fb2d 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -678,16 +678,15 @@
 
 #ifdef CONFIG_DEBUG_RODATA
 
-extern char __start_rodata, __end_rodata;
 void mark_rodata_ro(void)
 {
-	unsigned long addr = (unsigned long)&__start_rodata;
+	unsigned long addr = (unsigned long)__start_rodata;
 
-	for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+	for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
 		change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
 
 	printk ("Write protecting the kernel read-only data: %luk\n",
-			(&__end_rodata - &__start_rodata) >> 10);
+			(__end_rodata - __start_rodata) >> 10);
 
 	/*
 	 * change_page_attr_addr() requires a global_flush_tlb() call after it.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index dec044c..ea5a049 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -192,7 +192,7 @@
 	/* Make sure this is a valid target state */
 
 	if (!device->flags.power_manageable) {
-		printk(KERN_DEBUG "Device `[%s]is not power manageable",
+		printk(KERN_DEBUG "Device `[%s]' is not power manageable",
 				device->kobj.name);
 		return -ENODEV;
 	}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 39662f0..0a1b1ea 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -50,9 +50,9 @@
 #define DBG_RX          0x0200
 #define DBG_TX          0x0400
 static unsigned int debugflags;
-static unsigned int nbds_max = 16;
 #endif /* NDEBUG */
 
+static unsigned int nbds_max = 16;
 static struct nbd_device nbd_dev[MAX_NBD];
 
 /*
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index d75864e..f79f6b5 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -19,8 +19,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define AMD76X_REVISION	" Ver: 2.0.0 "  __DATE__
-
+#define AMD76X_REVISION	" Ver: 2.0.1 "  __DATE__
+#define EDAC_MOD_STR	"amd76x_edac"
 
 #define amd76x_printk(level, fmt, arg...) \
 	edac_printk(level, "amd76x", fmt, ##arg)
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 815c3eb..c82bc0e 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -24,7 +24,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define E752X_REVISION	" Ver: 2.0.0 " __DATE__
+#define E752X_REVISION	" Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR	"e752x_edac"
 
 static int force_function_unhide;
 
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 5a5ecd5..310d91b 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -29,7 +29,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define	E7XXX_REVISION " Ver: 2.0.0 " __DATE__
+#define	E7XXX_REVISION " Ver: 2.0.1 " __DATE__
+#define	EDAC_MOD_STR	"e7xxx_edac"
 
 #define e7xxx_printk(level, fmt, arg...) \
 	edac_printk(level, "e7xxx", fmt, ##arg)
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 1be4947..bf6ab8a 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -78,10 +78,6 @@
 
 #endif  /* !CONFIG_EDAC_DEBUG */
 
-#define edac_xstr(s) edac_str(s)
-#define edac_str(s) #s
-#define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME)
-
 #define BIT(x) (1 << (x))
 
 #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index e30a4a2..e4bb298 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -16,7 +16,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define  I82860_REVISION " Ver: 2.0.0 " __DATE__
+#define  I82860_REVISION " Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR	"i82860_edac"
 
 #define i82860_printk(level, fmt, arg...) \
 	edac_printk(level, "i82860", fmt, ##arg)
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 9423ee5..161fe09 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -20,7 +20,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define I82875P_REVISION	" Ver: 2.0.0 " __DATE__
+#define I82875P_REVISION	" Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR		"i82875p_edac"
 
 #define i82875p_printk(level, fmt, arg...) \
 	edac_printk(level, "i82875p", fmt, ##arg)
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index a0e248d..a49cf0a 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -22,7 +22,8 @@
 #include <linux/slab.h>
 #include "edac_mc.h"
 
-#define R82600_REVISION	" Ver: 2.0.0 " __DATE__
+#define R82600_REVISION	" Ver: 2.0.1 " __DATE__
+#define EDAC_MOD_STR	"r82600_edac"
 
 #define r82600_printk(level, fmt, arg...) \
 	edac_printk(level, "r82600", fmt, ##arg)
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 7fb3635..3cb0442 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -650,6 +650,8 @@
 	}
 	ide_set_hwifdata(hwif, idev);
 
+	hwif->atapi_dma = 1;
+
 	pci_read_config_byte(hwif->pci_dev, 0x50, &conf);
 	if(conf & 1) {
 		idev->smart = 1;
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 9ea67c4..1db9489 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,16 +1,16 @@
 config IPATH_CORE
-	tristate "PathScale InfiniPath Driver"
+	tristate "QLogic InfiniPath Driver"
 	depends on 64BIT && PCI_MSI && NET
 	---help---
-	This is a low-level driver for PathScale InfiniPath host channel
+	This is a low-level driver for QLogic InfiniPath host channel
 	adapters (HCAs) based on the HT-400 and PE-800 chips.
 
 config INFINIBAND_IPATH
-	tristate "PathScale InfiniPath Verbs Driver"
+	tristate "QLogic InfiniPath Verbs Driver"
 	depends on IPATH_CORE && INFINIBAND
 	---help---
 	This is a driver that provides InfiniBand verbs support for
-	PathScale InfiniPath host channel adapters (HCAs).  This
+	QLogic InfiniPath host channel adapters (HCAs).  This
 	allows these devices to be used with both kernel upper level
 	protocols such as IP-over-InfiniBand as well as with userspace
 	applications (in conjunction with InfiniBand userspace access).
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index b4d084a..b0bf728 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -1,4 +1,4 @@
-EXTRA_CFLAGS += -DIPATH_IDSTR='"PathScale kernel.org driver"' \
+EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
 	-DIPATH_KERN_TYPE=0
 
 obj-$(CONFIG_IPATH_CORE) += ipath_core.o
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 48a5524..062bd39 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -38,7 +39,8 @@
  * to communicate between kernel and user code.
  */
 
-/* This is the IEEE-assigned OUI for PathScale, Inc. */
+
+/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
 #define IPATH_SRC_OUI_1 0x00
 #define IPATH_SRC_OUI_2 0x11
 #define IPATH_SRC_OUI_3 0x75
@@ -96,8 +98,8 @@
 	__u64 sps_hwerrs;
 	/* number of times IB link changed state unexpectedly */
 	__u64 sps_iblink;
-	/* no longer used; left for compatibility */
-	__u64 sps_unused3;
+	/* kernel receive interrupts that didn't read intstat */
+	__u64 sps_fastrcvint;
 	/* number of kernel (port0) packets received */
 	__u64 sps_port0pkts;
 	/* number of "ethernet" packets sent by driver */
@@ -121,8 +123,7 @@
 	__u64 sps_ports;
 	/* list of pkeys (other than default) accepted (0 means not set) */
 	__u16 sps_pkeys[4];
-	/* lids for up to 4 infinipaths, indexed by infinipath # */
-	__u16 sps_lid[4];
+	__u16 sps_unused16[4]; /* available; maintaining compatible layout */
 	/* number of user ports per chip (not IB ports) */
 	__u32 sps_nports;
 	/* not our interrupt, or already handled */
@@ -140,10 +141,8 @@
 	 * packets if ipath not configured, sma/mad, etc.)
 	 */
 	__u64 sps_krdrops;
-	/* mlids for up to 4 infinipaths, indexed by infinipath # */
-	__u16 sps_mlid[4];
 	/* pad for future growth */
-	__u64 __sps_pad[45];
+	__u64 __sps_pad[46];
 };
 
 /*
@@ -310,6 +309,9 @@
 	__u32 spi_rcv_egrchunksize;
 	/* total size of mmap to cover full rcvegrbuffers */
 	__u32 spi_rcv_egrbuftotlen;
+	__u32 spi_filler_for_align;
+	/* address of readonly memory copy of the rcvhdrq tail register. */
+	__u64 spi_rcvhdr_tailaddr;
 } __attribute__ ((aligned(8)));
 
 
@@ -342,9 +344,9 @@
 /*
  * Similarly, this is the kernel version going back to the user.  It's
  * slightly different, in that we want to tell if the driver was built as
- * part of a PathScale release, or from the driver from OpenIB, kernel.org,
- * or a standard distribution, for support reasons.  The high bit is 0 for
- * non-PathScale, and 1 for PathScale-built/supplied.
+ * part of a QLogic release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
  *
  * It's returned by the driver to the user code during initialization in the
  * spi_sw_version field of ipath_base_info, so the user code can in turn
@@ -379,13 +381,7 @@
 	 */
 	__u32 spu_rcvhdrsize;
 
-	/*
-	 * cache line aligned (64 byte) user address to
-	 * which the rcvhdrtail register will be written by infinipath
-	 * whenever it changes, so that no chip registers are read in
-	 * the performance path.
-	 */
-	__u64 spu_rcvhdraddr;
+	__u64 spu_unused; /* kept for compatible layout */
 
 	/*
 	 * address of struct base_info to write to
@@ -481,7 +477,7 @@
  * Data layout in I2C flash (for GUID, etc.)
  * All fields are little-endian binary unless otherwise stated
  */
-#define IPATH_FLASH_VERSION 1
+#define IPATH_FLASH_VERSION 2
 struct ipath_flash {
 	/* flash layout version (IPATH_FLASH_VERSION) */
 	__u8 if_fversion;
@@ -489,14 +485,14 @@
 	__u8 if_csum;
 	/*
 	 * valid length (in use, protected by if_csum), including
-	 * if_fversion and if_sum themselves)
+	 * if_fversion and if_csum themselves)
 	 */
 	__u8 if_length;
 	/* the GUID, in network order */
 	__u8 if_guid[8];
 	/* number of GUIDs to use, starting from if_guid */
 	__u8 if_numguid;
-	/* the board serial number, in ASCII */
+	/* the (last 10 characters of) board serial number, in ASCII */
 	char if_serial[12];
 	/* board mfg date (YYYYMMDD ASCII) */
 	char if_mfgdate[8];
@@ -508,8 +504,10 @@
 	__u8 if_powerhour[2];
 	/* ASCII free-form comment field */
 	char if_comment[32];
-	/* 78 bytes used, min flash size is 128 bytes */
-	__u8 if_future[50];
+	/* Backwards compatible prefix for longer QLogic Serial Numbers */
+	char if_sprefix[4];
+	/* 82 bytes used, min flash size is 128 bytes */
+	__u8 if_future[46];
 };
 
 /*
@@ -603,14 +601,118 @@
 #define INFINIPATH_KPF_INTR 0x1
 
 /* SendPIO per-buffer control */
-#define INFINIPATH_SP_LENGTHP1_MASK 0x3FF
-#define INFINIPATH_SP_LENGTHP1_SHIFT 0
-#define INFINIPATH_SP_INTR    0x80000000
-#define INFINIPATH_SP_TEST    0x40000000
-#define INFINIPATH_SP_TESTEBP 0x20000000
+#define INFINIPATH_SP_TEST    0x40
+#define INFINIPATH_SP_TESTEBP 0x20
 
 /* SendPIOAvail bits */
 #define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
 #define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
 
+/* infinipath header format */
+struct ipath_header {
+	/*
+	 * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
+	 * 14 bits before ECO change ~28 Dec 03.  After that, Vers 4,
+	 * Port 3, TID 11, offset 14.
+	 */
+	__le32 ver_port_tid_offset;
+	__le16 chksum;
+	__le16 pkt_flags;
+};
+
+/* infinipath user message header format.
+ * This structure contains the first 4 fields common to all protocols
+ * that employ infinipath.
+ */
+struct ipath_message_header {
+	__be16 lrh[4];
+	__be32 bth[3];
+	/* fields below this point are in host byte order */
+	struct ipath_header iph;
+	__u8 sub_opcode;
+};
+
+/* infinipath ethernet header format */
+struct ether_header {
+	__be16 lrh[4];
+	__be32 bth[3];
+	struct ipath_header iph;
+	__u8 sub_opcode;
+	__u8 cmd;
+	__be16 lid;
+	__u16 mac[3];
+	__u8 frag_num;
+	__u8 seq_num;
+	__le32 len;
+	/* MUST be of word size due to PIO write requirements */
+	__le32 csum;
+	__le16 csum_offset;
+	__le16 flags;
+	__u16 first_2_bytes;
+	__u8 unused[2];		/* currently unused */
+};
+
+
+/* IB - LRH header consts */
+#define IPATH_LRH_GRH 0x0003	/* 1. word of IB LRH - next header: GRH */
+#define IPATH_LRH_BTH 0x0002	/* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define IPATH_DEFAULT_P_KEY 0xFFFF
+#define IPATH_PERMISSIVE_LID 0xFFFF
+#define IPATH_AETH_CREDIT_SHIFT 24
+#define IPATH_AETH_CREDIT_MASK 0x1F
+#define IPATH_AETH_CREDIT_INVAL 0x1F
+#define IPATH_PSN_MASK 0xFFFFFF
+#define IPATH_MSN_MASK 0xFFFFFF
+#define IPATH_QPN_MASK 0xFFFFFF
+#define IPATH_MULTICAST_LID_BASE 0xC000
+#define IPATH_MULTICAST_QPN 0xFFFFFF
+
+/* Receive Header Queue: receive type (from infinipath) */
+#define RCVHQ_RCV_TYPE_EXPECTED  0
+#define RCVHQ_RCV_TYPE_EAGER     1
+#define RCVHQ_RCV_TYPE_NON_KD    2
+#define RCVHQ_RCV_TYPE_ERROR     3
+
+
+/* sub OpCodes - ith4x  */
+#define IPATH_ITH4X_OPCODE_ENCAP 0x81
+#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
+
+#define IPATH_HEADER_QUEUE_WORDS 9
+
+/* functions for extracting fields from rcvhdrq entries for the driver.
+ */
+static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
+{
+	return __le32_to_cpu(rbuf[1]);
+}
+
+static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
+{
+	return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
+	    & INFINIPATH_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
+{
+	return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
+		& INFINIPATH_RHF_LENGTH_MASK) << 2;
+}
+
+static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
+{
+	return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
+	    & INFINIPATH_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
+{
+	return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
+	    & INFINIPATH_I_VERS_MASK;
+}
+
 #endif				/* _IPATH_COMMON_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 7ece113..3efee34 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -157,10 +158,21 @@
 			      struct ib_ucontext *context,
 			      struct ib_udata *udata)
 {
+	struct ipath_ibdev *dev = to_idev(ibdev);
 	struct ipath_cq *cq;
 	struct ib_wc *wc;
 	struct ib_cq *ret;
 
+	if (entries > ib_ipath_max_cqes) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
+	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
 	/*
 	 * Need to use vmalloc() if we want to support large #s of
 	 * entries.
@@ -196,6 +208,8 @@
 
 	ret = &cq->ibcq;
 
+	dev->n_cqs_allocated++;
+
 bail:
 	return ret;
 }
@@ -210,9 +224,11 @@
  */
 int ipath_destroy_cq(struct ib_cq *ibcq)
 {
+	struct ipath_ibdev *dev = to_idev(ibcq->device);
 	struct ipath_cq *cq = to_icq(ibcq);
 
 	tasklet_kill(&cq->comptask);
+	dev->n_cqs_allocated--;
 	vfree(cq->queue);
 	kfree(cq);
 
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index 4676238..f415bed 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 28ddceb..147dd89 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -43,10 +44,9 @@
 #include <linux/pci.h>
 #include <asm/uaccess.h>
 
-#include "ipath_common.h"
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 int ipath_diag_inuse;
 static int diag_set_link;
@@ -66,18 +66,20 @@
 	.release = ipath_diag_release
 };
 
-static struct cdev *diag_cdev;
-static struct class_device *diag_class_dev;
-
-int ipath_diag_init(void)
+int ipath_diag_add(struct ipath_devdata *dd)
 {
-	return ipath_cdev_init(IPATH_DIAG_MINOR, "ipath_diag",
-			       &diag_file_ops, &diag_cdev, &diag_class_dev);
+	char name[16];
+
+	snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
+
+	return ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
+			       &diag_file_ops, &dd->diag_cdev,
+			       &dd->diag_class_dev);
 }
 
-void ipath_diag_cleanup(void)
+void ipath_diag_remove(struct ipath_devdata *dd)
 {
-	ipath_cdev_cleanup(&diag_cdev, &diag_class_dev);
+	ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev);
 }
 
 /**
@@ -101,8 +103,7 @@
 	int ret;
 
 	/* not very efficient, but it works for now */
-	if (reg_addr < dd->ipath_kregbase ||
-	    reg_end > dd->ipath_kregend) {
+	if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
 		ret = -EINVAL;
 		goto bail;
 	}
@@ -113,7 +114,7 @@
 			goto bail;
 		}
 		reg_addr++;
-		uaddr++;
+		uaddr += sizeof(u64);
 	}
 	ret = 0;
 bail:
@@ -139,8 +140,7 @@
 	int ret;
 
 	/* not very efficient, but it works for now */
-	if (reg_addr < dd->ipath_kregbase ||
-	    reg_end > dd->ipath_kregend) {
+	if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
 		ret = -EINVAL;
 		goto bail;
 	}
@@ -153,7 +153,7 @@
 		writeq(data, reg_addr);
 
 		reg_addr++;
-		uaddr++;
+		uaddr += sizeof(u64);
 	}
 	ret = 0;
 bail:
@@ -191,7 +191,8 @@
 		}
 
 		reg_addr++;
-		uaddr++;
+		uaddr += sizeof(u32);
+
 	}
 	ret = 0;
 bail:
@@ -230,7 +231,7 @@
 		writel(data, reg_addr);
 
 		reg_addr++;
-		uaddr++;
+		uaddr += sizeof(u32);
 	}
 	ret = 0;
 bail:
@@ -239,59 +240,45 @@
 
 static int ipath_diag_open(struct inode *in, struct file *fp)
 {
+	int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
 	struct ipath_devdata *dd;
-	int unit = 0; /* XXX this is bogus */
-	unsigned long flags;
 	int ret;
 
-	dd = ipath_lookup(unit);
-
 	mutex_lock(&ipath_mutex);
-	spin_lock_irqsave(&ipath_devs_lock, flags);
 
 	if (ipath_diag_inuse) {
 		ret = -EBUSY;
 		goto bail;
 	}
 
-	list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-		/*
-		 * we need at least one infinipath device to be present
-		 * (don't use INITTED, because we want to be able to open
-		 * even if device is in freeze mode, which cleared INITTED).
-		 * There is a small amount of risk to this, which is why we
-		 * also verify kregbase is set.
-		 */
+	dd = ipath_lookup(unit);
 
-		if (!(dd->ipath_flags & IPATH_PRESENT) ||
-		    !dd->ipath_kregbase)
-			continue;
-
-		ipath_diag_inuse = 1;
-		diag_set_link = 0;
-		ret = 0;
+	if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
+	    !dd->ipath_kregbase) {
+		ret = -ENODEV;
 		goto bail;
 	}
 
-	ret = -ENODEV;
-
-bail:
-	spin_unlock_irqrestore(&ipath_devs_lock, flags);
+	fp->private_data = dd;
+	ipath_diag_inuse = 1;
+	diag_set_link = 0;
+	ret = 0;
 
 	/* Only expose a way to reset the device if we
 	   make it into diag mode. */
-	if (ret == 0)
-		ipath_expose_reset(&dd->pcidev->dev);
+	ipath_expose_reset(&dd->pcidev->dev);
 
+bail:
 	mutex_unlock(&ipath_mutex);
 
 	return ret;
 }
 
-static int ipath_diag_release(struct inode *i, struct file *f)
+static int ipath_diag_release(struct inode *in, struct file *fp)
 {
 	mutex_lock(&ipath_mutex);
 	ipath_diag_inuse = 0;
+	fp->private_data = NULL;
 	mutex_unlock(&ipath_mutex);
 	return 0;
 }
@@ -299,17 +286,10 @@
 static ssize_t ipath_diag_read(struct file *fp, char __user *data,
 			       size_t count, loff_t *off)
 {
-	int unit = 0; /* XXX provide for reads on other units some day */
-	struct ipath_devdata *dd;
+	struct ipath_devdata *dd = fp->private_data;
 	void __iomem *kreg_base;
 	ssize_t ret;
 
-	dd = ipath_lookup(unit);
-	if (!dd) {
-		ret = -ENODEV;
-		goto bail;
-	}
-
 	kreg_base = dd->ipath_kregbase;
 
 	if (count == 0)
@@ -328,23 +308,16 @@
 		ret = count;
 	}
 
-bail:
 	return ret;
 }
 
 static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
 				size_t count, loff_t *off)
 {
-	int unit = 0; /* XXX this is bogus */
-	struct ipath_devdata *dd;
+	struct ipath_devdata *dd = fp->private_data;
 	void __iomem *kreg_base;
 	ssize_t ret;
 
-	dd = ipath_lookup(unit);
-	if (!dd) {
-		ret = -ENODEV;
-		goto bail;
-	}
 	kreg_base = dd->ipath_kregbase;
 
 	if (count == 0)
@@ -363,6 +336,5 @@
 		ret = count;
 	}
 
-bail:
 	return ret;
 }
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index e4b897f..6efc56b 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -38,8 +39,8 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 static void ipath_update_pio_bufs(struct ipath_devdata *);
 
@@ -52,7 +53,7 @@
 
 EXPORT_SYMBOL_GPL(ipath_get_unit_name);
 
-#define DRIVER_LOAD_MSG "PathScale " IPATH_DRV_NAME " loaded: "
+#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
 #define PFX IPATH_DRV_NAME ": "
 
 /*
@@ -74,8 +75,8 @@
 EXPORT_SYMBOL_GPL(ipath_debug);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("PathScale <support@pathscale.com>");
-MODULE_DESCRIPTION("Pathscale InfiniPath driver");
+MODULE_AUTHOR("QLogic <support@pathscale.com>");
+MODULE_DESCRIPTION("QLogic InfiniPath driver");
 
 const char *ipath_ibcstatus_str[] = {
 	"Disabled",
@@ -130,14 +131,6 @@
 	.id_table = ipath_pci_tbl,
 };
 
-/*
- * This is where port 0's rcvhdrtail register is written back; we also
- * want nothing else sharing the cache line, so make it a cache line
- * in size.  Used for all units.
- */
-volatile __le64 *ipath_port0_rcvhdrtail;
-dma_addr_t ipath_port0_rcvhdrtail_dma;
-static int port0_rcvhdrtail_refs;
 
 static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
 			     u32 *bar0, u32 *bar1)
@@ -170,14 +163,13 @@
 		list_del(&dd->ipath_list);
 		spin_unlock_irqrestore(&ipath_devs_lock, flags);
 	}
-	dma_free_coherent(&pdev->dev, sizeof(*dd), dd, dd->ipath_dma_addr);
+	vfree(dd);
 }
 
 static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
 {
 	unsigned long flags;
 	struct ipath_devdata *dd;
-	dma_addr_t dma_addr;
 	int ret;
 
 	if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
@@ -185,15 +177,12 @@
 		goto bail;
 	}
 
-	dd = dma_alloc_coherent(&pdev->dev, sizeof(*dd), &dma_addr,
-				GFP_KERNEL);
-
+	dd = vmalloc(sizeof(*dd));
 	if (!dd) {
 		dd = ERR_PTR(-ENOMEM);
 		goto bail;
 	}
-
-	dd->ipath_dma_addr = dma_addr;
+	memset(dd, 0, sizeof(*dd));
 	dd->ipath_unit = -1;
 
 	spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -271,47 +260,6 @@
 	return nunits;
 }
 
-static int init_port0_rcvhdrtail(struct pci_dev *pdev)
-{
-	int ret;
-
-	mutex_lock(&ipath_mutex);
-
-	if (!ipath_port0_rcvhdrtail) {
-		ipath_port0_rcvhdrtail =
-			dma_alloc_coherent(&pdev->dev,
-					   IPATH_PORT0_RCVHDRTAIL_SIZE,
-					   &ipath_port0_rcvhdrtail_dma,
-					   GFP_KERNEL);
-
-		if (!ipath_port0_rcvhdrtail) {
-			ret = -ENOMEM;
-			goto bail;
-		}
-	}
-	port0_rcvhdrtail_refs++;
-	ret = 0;
-
-bail:
-	mutex_unlock(&ipath_mutex);
-
-	return ret;
-}
-
-static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev)
-{
-	mutex_lock(&ipath_mutex);
-
-	if (!--port0_rcvhdrtail_refs) {
-		dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE,
-				  (void *) ipath_port0_rcvhdrtail,
-				  ipath_port0_rcvhdrtail_dma);
-		ipath_port0_rcvhdrtail = NULL;
-	}
-
-	mutex_unlock(&ipath_mutex);
-}
-
 /*
  * These next two routines are placeholders in case we don't have per-arch
  * code for controlling write combining.  If explicit control of write
@@ -336,20 +284,12 @@
 	u32 bar0 = 0, bar1 = 0;
 	u8 rev;
 
-	ret = init_port0_rcvhdrtail(pdev);
-	if (ret < 0) {
-		printk(KERN_ERR IPATH_DRV_NAME
-		       ": Could not allocate port0_rcvhdrtail: error %d\n",
-		       -ret);
-		goto bail;
-	}
-
 	dd = ipath_alloc_devdata(pdev);
 	if (IS_ERR(dd)) {
 		ret = PTR_ERR(dd);
 		printk(KERN_ERR IPATH_DRV_NAME
 		       ": Could not allocate devdata: error %d\n", -ret);
-		goto bail_rcvhdrtail;
+		goto bail;
 	}
 
 	ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
@@ -424,12 +364,29 @@
 		 */
 		ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
 		if (ret) {
-			dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
-				 "fails: %d\n", dd->ipath_unit, ret);
+			dev_info(&pdev->dev,
+				"Unable to set DMA mask for unit %u: %d\n",
+				dd->ipath_unit, ret);
 			goto bail_regions;
 		}
-		else
+		else {
 			ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
+			ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+			if (ret)
+				dev_info(&pdev->dev,
+					"Unable to set DMA consistent mask "
+					"for unit %u: %d\n",
+					dd->ipath_unit, ret);
+
+		}
+	}
+	else {
+		ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+		if (ret)
+			dev_info(&pdev->dev,
+				"Unable to set DMA consistent mask "
+				"for unit %u: %d\n",
+				dd->ipath_unit, ret);
 	}
 
 	pci_set_master(pdev);
@@ -452,7 +409,7 @@
 		ipath_init_pe800_funcs(dd);
 		break;
 	default:
-		ipath_dev_err(dd, "Found unknown PathScale deviceid 0x%x, "
+		ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
 			      "failing\n", ent->device);
 		return -ENODEV;
 	}
@@ -495,16 +452,16 @@
 		((void __iomem *)dd->ipath_kregbase + len);
 	dd->ipath_physaddr = addr;	/* used for io_remap, etc. */
 	/* for user mmap */
-	dd->ipath_kregvirt = (u64 __iomem *) phys_to_virt(addr);
-	ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p "
-		   "kregvirt %p\n", addr, dd->ipath_kregbase,
-		   dd->ipath_kregvirt);
+	ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
+		   addr, dd->ipath_kregbase);
 
 	/*
 	 * clear ipath_flags here instead of in ipath_init_chip as it is set
 	 * by ipath_setup_htconfig.
 	 */
 	dd->ipath_flags = 0;
+	dd->ipath_lli_counter = 0;
+	dd->ipath_lli_errors = 0;
 
 	if (dd->ipath_f_bus(dd, pdev))
 		ipath_dev_err(dd, "Failed to setup config space; "
@@ -545,6 +502,7 @@
 	ipath_device_create_group(&pdev->dev, dd);
 	ipathfs_add_device(dd);
 	ipath_user_add(dd);
+	ipath_diag_add(dd);
 	ipath_layer_add(dd);
 
 	goto bail;
@@ -561,9 +519,6 @@
 bail_devdata:
 	ipath_free_devdata(pdev, dd);
 
-bail_rcvhdrtail:
-	cleanup_port0_rcvhdrtail(pdev);
-
 bail:
 	return ret;
 }
@@ -577,8 +532,9 @@
 		return;
 
 	dd = pci_get_drvdata(pdev);
-	ipath_layer_del(dd);
-	ipath_user_del(dd);
+	ipath_layer_remove(dd);
+	ipath_diag_remove(dd);
+	ipath_user_remove(dd);
 	ipathfs_remove_device(dd);
 	ipath_device_remove_group(&pdev->dev, dd);
 	ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
@@ -594,7 +550,6 @@
 	pci_disable_device(pdev);
 
 	ipath_free_devdata(pdev, dd);
-	cleanup_port0_rcvhdrtail(pdev);
 }
 
 /* general driver use */
@@ -868,7 +823,8 @@
 	u8 pad, *bthbytes;
 	struct sk_buff *skb, *nskb;
 
-	if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) {
+	if (dd->ipath_port0_skbs &&
+			hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) {
 		/*
 		 * Allocate a new sk_buff to replace the one we give
 		 * to the network stack.
@@ -899,7 +855,7 @@
 		/* another ether packet received */
 		ipath_stats.sps_ether_rpkts++;
 	}
-	else if (hdr->sub_opcode == OPCODE_LID_ARP)
+	else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP)
 		__ipath_layer_rcv_lid(dd, hdr);
 }
 
@@ -916,8 +872,8 @@
 	const u32 rsize = dd->ipath_rcvhdrentsize;	/* words */
 	const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;	/* words */
 	u32 etail = -1, l, hdrqtail;
-	struct ips_message_header *hdr;
-	u32 eflags, i, etype, tlen, pkttot = 0;
+	struct ipath_message_header *hdr;
+	u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
 	static u64 totcalls;	/* stats, may eventually remove */
 	char emsg[128];
 
@@ -931,24 +887,18 @@
 	if (test_and_set_bit(0, &dd->ipath_rcv_pending))
 		goto bail;
 
-	if (dd->ipath_port0head ==
-	    (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
+	l = dd->ipath_port0head;
+	hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
+	if (l == hdrqtail)
 		goto done;
 
-gotmore:
-	/*
-	 * read only once at start.  If in flood situation, this helps
-	 * performance slightly.  If more arrive while we are processing,
-	 * we'll come back here and do them
-	 */
-	hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
-
-	for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) {
+reloop:
+	for (i = 0; l != hdrqtail; i++) {
 		u32 qp;
 		u8 *bthbytes;
 
 		rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
-		hdr = (struct ips_message_header *)&rc[1];
+		hdr = (struct ipath_message_header *)&rc[1];
 		/*
 		 * could make a network order version of IPATH_KD_QP, and
 		 * do the obvious shift before masking to speed this up.
@@ -956,10 +906,10 @@
 		qp = ntohl(hdr->bth[1]) & 0xffffff;
 		bthbytes = (u8 *) hdr->bth;
 
-		eflags = ips_get_hdr_err_flags((__le32 *) rc);
-		etype = ips_get_rcv_type((__le32 *) rc);
+		eflags = ipath_hdrget_err_flags((__le32 *) rc);
+		etype = ipath_hdrget_rcv_type((__le32 *) rc);
 		/* total length */
-		tlen = ips_get_length_in_bytes((__le32 *) rc);
+		tlen = ipath_hdrget_length_in_bytes((__le32 *) rc);
 		ebuf = NULL;
 		if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
 			/*
@@ -969,7 +919,7 @@
 			 * set ebuf (so we try to copy data) unless the
 			 * length requires it.
 			 */
-			etail = ips_get_index((__le32 *) rc);
+			etail = ipath_hdrget_index((__le32 *) rc);
 			if (tlen > sizeof(*hdr) ||
 			    etype == RCVHQ_RCV_TYPE_NON_KD)
 				ebuf = ipath_get_egrbuf(dd, etail, 0);
@@ -981,7 +931,7 @@
 		 */
 
 		if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
-		    RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver(
+		    RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver(
 			    hdr->iph.ver_port_tid_offset) !=
 		    IPS_PROTO_VERSION) {
 			ipath_cdbg(PKT, "Bad InfiniPath protocol version "
@@ -994,7 +944,19 @@
 			ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
 				   "tlen=%x opcode=%x egridx=%x: %s\n",
 				   eflags, l, etype, tlen, bthbytes[0],
-				   ips_get_index((__le32 *) rc), emsg);
+				   ipath_hdrget_index((__le32 *) rc), emsg);
+			/* Count local link integrity errors. */
+			if (eflags & (INFINIPATH_RHF_H_ICRCERR |
+				      INFINIPATH_RHF_H_VCRCERR)) {
+				u8 n = (dd->ipath_ibcctrl >>
+					INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+					INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+
+				if (++dd->ipath_lli_counter > n) {
+					dd->ipath_lli_counter = 0;
+					dd->ipath_lli_errors++;
+				}
+			}
 		} else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
 				int ret = __ipath_verbs_rcv(dd, rc + 1,
 							    ebuf, tlen);
@@ -1002,6 +964,9 @@
 					ipath_cdbg(VERBOSE,
 						   "received IB packet, "
 						   "not SMA (QP=%x)\n", qp);
+				if (dd->ipath_lli_counter)
+					dd->ipath_lli_counter--;
+
 		} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
 			if (qp == IPATH_KD_QP &&
 			    bthbytes[0] == ipath_layer_rcv_opcode &&
@@ -1054,25 +1019,49 @@
 		l += rsize;
 		if (l >= maxcnt)
 			l = 0;
-		/*
-		 * update for each packet, to help prevent overflows if we
-		 * have lots of packets.
-		 */
-		(void)ipath_write_ureg(dd, ur_rcvhdrhead,
-				       dd->ipath_rhdrhead_intr_off | l, 0);
 		if (etype != RCVHQ_RCV_TYPE_EXPECTED)
-			(void)ipath_write_ureg(dd, ur_rcvegrindexhead,
-					       etail, 0);
+		    updegr = 1;
+		/*
+		 * update head regs on last packet, and every 16 packets.
+		 * Reduce bus traffic, while still trying to prevent
+		 * rcvhdrq overflows, for when the queue is nearly full
+		 */
+		if (l == hdrqtail || (i && !(i&0xf))) {
+			u64 lval;
+			if (l == hdrqtail) /* PE-800 interrupt only on last */
+				lval = dd->ipath_rhdrhead_intr_off | l;
+			else
+				lval = l;
+			(void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
+			if (updegr) {
+				(void)ipath_write_ureg(dd, ur_rcvegrindexhead,
+						       etail, 0);
+				updegr = 0;
+			}
+		}
+	}
+
+	if (!dd->ipath_rhdrhead_intr_off && !reloop) {
+		/* HT-400 workaround; we can have a race clearing chip
+		 * interrupt with another interrupt about to be delivered,
+		 * and can clear it before it is delivered on the GPIO
+		 * workaround.  By doing the extra check here for the
+		 * in-memory tail register updating while we were doing
+		 * earlier packets, we "almost" guarantee we have covered
+		 * that case.
+		 */
+		u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
+		if (hqtail != hdrqtail) {
+			hdrqtail = hqtail;
+			reloop = 1; /* loop 1 extra time at most */
+			goto reloop;
+		}
 	}
 
 	pkttot += i;
 
 	dd->ipath_port0head = l;
 
-	if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
-		/* more arrived while we handled first batch */
-		goto gotmore;
-
 	if (pkttot > ipath_stats.sps_maxpkts_call)
 		ipath_stats.sps_maxpkts_call = pkttot;
 	ipath_stats.sps_port0pkts += pkttot;
@@ -1369,26 +1358,20 @@
  * @dd: the infinipath device
  * @pd: the port data
  *
- * this *must* be physically contiguous memory, and for now,
- * that limits it to what kmalloc can do.
+ * this must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
  */
 int ipath_create_rcvhdrq(struct ipath_devdata *dd,
 			 struct ipath_portdata *pd)
 {
-	int ret = 0, amt;
+	int ret = 0;
 
-	amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-		    sizeof(u32), PAGE_SIZE);
 	if (!pd->port_rcvhdrq) {
-		/*
-		 * not using REPEAT isn't viable; at 128KB, we can easily
-		 * fail this.  The problem with REPEAT is we can block here
-		 * "forever".  There isn't an inbetween, unfortunately.  We
-		 * could reduce the risk by never freeing the rcvhdrq except
-		 * at unload, but even then, the first time a port is used,
-		 * we could delay for some time...
-		 */
+		dma_addr_t phys_hdrqtail;
 		gfp_t gfp_flags = GFP_USER | __GFP_COMP;
+		int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
+				sizeof(u32), PAGE_SIZE);
 
 		pd->port_rcvhdrq = dma_alloc_coherent(
 			&dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
@@ -1401,6 +1384,16 @@
 			ret = -ENOMEM;
 			goto bail;
 		}
+		pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
+			&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
+		if (!pd->port_rcvhdrtail_kvaddr) {
+			ipath_dev_err(dd, "attempt to allocate 1 page "
+				      "for port %u rcvhdrqtailaddr failed\n",
+				      pd->port_port);
+			ret = -ENOMEM;
+			goto bail;
+		}
+		pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
 
 		pd->port_rcvhdrq_size = amt;
 
@@ -1410,20 +1403,28 @@
 			   (unsigned long) pd->port_rcvhdrq_phys,
 			   (unsigned long) pd->port_rcvhdrq_size,
 			   pd->port_port);
-	} else {
-		/*
-		 * clear for security, sanity, and/or debugging, each
-		 * time we reuse
-		 */
-		memset(pd->port_rcvhdrq, 0, amt);
+
+		ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
+			   pd->port_port,
+			   (unsigned long long) phys_hdrqtail);
 	}
+	else
+		ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
+			   "hdrtailaddr@%p %llx physical\n",
+			   pd->port_port, pd->port_rcvhdrq,
+			   pd->port_rcvhdrq_phys, pd->port_rcvhdrtail_kvaddr,
+			   (unsigned long long)pd->port_rcvhdrqtailaddr_phys);
+
+	/* clear for security and sanity on each use */
+	memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
+	memset((void *)pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
 
 	/*
 	 * tell chip each time we init it, even if we are re-using previous
-	 * memory (we zero it at process close)
+	 * memory (we zero the register at process close)
 	 */
-	ipath_cdbg(VERBOSE, "writing port %d rcvhdraddr as %lx\n",
-		   pd->port_port, (unsigned long) pd->port_rcvhdrq_phys);
+	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
+			      pd->port_port, pd->port_rcvhdrqtailaddr_phys);
 	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
 			      pd->port_port, pd->port_rcvhdrq_phys);
 
@@ -1511,15 +1512,27 @@
 		[INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
 		[INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
 	};
+	int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
+			INFINIPATH_IBCC_LINKCMD_MASK;
+
 	ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
 		   "is %s\n", dd->ipath_unit,
-		   what[(which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
-			INFINIPATH_IBCC_LINKCMD_MASK],
+		   what[linkcmd],
 		   ipath_ibcstatus_str[
 			   (ipath_read_kreg64
 			    (dd, dd->ipath_kregs->kr_ibcstatus) >>
 			    INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
 			   INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
+	/* flush all queued sends when going to DOWN or INIT, to be sure that
+	 * they don't block SMA and other MAD packets */
+	if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+				 INFINIPATH_S_ABORT);
+		ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
+		                    (unsigned)(dd->ipath_piobcnt2k +
+				    dd->ipath_piobcnt4k) -
+				    dd->ipath_lastport_piobuf);
+	}
 
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
 			 dd->ipath_ibcctrl | which);
@@ -1638,7 +1651,7 @@
 	/* disable IBC */
 	dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-			 dd->ipath_control);
+			 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
 
 	/*
 	 * clear SerdesEnable and turn the leds off; do this here because
@@ -1667,60 +1680,54 @@
 /**
  * ipath_free_pddata - free a port's allocated data
  * @dd: the infinipath device
- * @port: the port
- * @freehdrq: free the port data structure if true
+ * @pd: the portdata structure
  *
- * when closing, free up any allocated data for a port, if the
- * reference count goes to zero
- * Note: this also optionally frees the portdata itself!
- * Any changes here have to be matched up with the reinit case
- * of ipath_init_chip(), which calls this routine on reinit after reset.
+ * free up any allocated data for a port
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of port data, because it is called after ipath_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ * (The only exception to global state is freeing the port0 port0_skbs.)
  */
-void ipath_free_pddata(struct ipath_devdata *dd, u32 port, int freehdrq)
+void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
 {
-	struct ipath_portdata *pd = dd->ipath_pd[port];
-
 	if (!pd)
 		return;
-	if (freehdrq)
-		/*
-		 * only clear and free portdata if we are going to also
-		 * release the hdrq, otherwise we leak the hdrq on each
-		 * open/close cycle
-		 */
-		dd->ipath_pd[port] = NULL;
-	if (freehdrq && pd->port_rcvhdrq) {
+
+	if (pd->port_rcvhdrq) {
 		ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
 			   "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
 			   (unsigned long) pd->port_rcvhdrq_size);
 		dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
 				  pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
 		pd->port_rcvhdrq = NULL;
-	}
-	if (port && pd->port_rcvegrbuf) {
-		/* always free this */
-		if (pd->port_rcvegrbuf) {
-			unsigned e;
-
-			for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-				void *base = pd->port_rcvegrbuf[e];
-				size_t size = pd->port_rcvegrbuf_size;
-
-				ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
-					   "chunk %u/%u\n", base,
-					   (unsigned long) size,
-					   e, pd->port_rcvegrbuf_chunks);
-				dma_free_coherent(
-					&dd->pcidev->dev, size, base,
-					pd->port_rcvegrbuf_phys[e]);
-			}
-			vfree(pd->port_rcvegrbuf);
-			pd->port_rcvegrbuf = NULL;
-			vfree(pd->port_rcvegrbuf_phys);
-			pd->port_rcvegrbuf_phys = NULL;
+		if (pd->port_rcvhdrtail_kvaddr) {
+			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+					 (void *)pd->port_rcvhdrtail_kvaddr,
+					 pd->port_rcvhdrqtailaddr_phys);
+			pd->port_rcvhdrtail_kvaddr = NULL;
 		}
+	}
+	if (pd->port_port && pd->port_rcvegrbuf) {
+		unsigned e;
+
+		for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
+			void *base = pd->port_rcvegrbuf[e];
+			size_t size = pd->port_rcvegrbuf_size;
+
+			ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
+				   "chunk %u/%u\n", base,
+				   (unsigned long) size,
+				   e, pd->port_rcvegrbuf_chunks);
+			dma_free_coherent(&dd->pcidev->dev, size,
+				base, pd->port_rcvegrbuf_phys[e]);
+		}
+		vfree(pd->port_rcvegrbuf);
+		pd->port_rcvegrbuf = NULL;
+		vfree(pd->port_rcvegrbuf_phys);
+		pd->port_rcvegrbuf_phys = NULL;
 		pd->port_rcvegrbuf_chunks = 0;
-	} else if (port == 0 && dd->ipath_port0_skbs) {
+	} else if (pd->port_port == 0 && dd->ipath_port0_skbs) {
 		unsigned e;
 		struct sk_buff **skbs = dd->ipath_port0_skbs;
 
@@ -1732,10 +1739,8 @@
 				dev_kfree_skb(skbs[e]);
 		vfree(skbs);
 	}
-	if (freehdrq) {
-		kfree(pd->port_tid_pg_list);
-		kfree(pd);
-	}
+	kfree(pd->port_tid_pg_list);
+	kfree(pd);
 }
 
 static int __init infinipath_init(void)
@@ -1806,7 +1811,6 @@
 			 * re-init
 			 */
 			dd->ipath_kregbase = NULL;
-			dd->ipath_kregvirt = NULL;
 			dd->ipath_uregbase = 0;
 			dd->ipath_sregbase = 0;
 			dd->ipath_cregbase = 0;
@@ -1821,6 +1825,12 @@
 				  dd->ipath_pioavailregs_phys);
 		dd->ipath_pioavailregs_dma = NULL;
 	}
+	if (dd->ipath_dummy_hdrq) {
+		dma_free_coherent(&dd->pcidev->dev,
+			dd->ipath_pd[0]->port_rcvhdrq_size,
+			dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
+		dd->ipath_dummy_hdrq = NULL;
+	}
 
 	if (dd->ipath_pageshadow) {
 		struct page **tmpp = dd->ipath_pageshadow;
@@ -1861,10 +1871,14 @@
 
 	/*
 	 * free any resources still in use (usually just kernel ports)
-	 * at unload
+	 * at unload; we do for portcnt, not cfgports, because cfgports
+	 * could have changed while we were loaded.
 	 */
-	for (port = 0; port < dd->ipath_cfgports; port++)
-		ipath_free_pddata(dd, port, 1);
+	for (port = 0; port < dd->ipath_portcnt; port++) {
+		struct ipath_portdata *pd = dd->ipath_pd[port];
+		dd->ipath_pd[port] = NULL;
+		ipath_free_pddata(dd, pd);
+	}
 	kfree(dd->ipath_pd);
 	/*
 	 * debuggability, in case some cleanup path tries to use it
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index a2f1cea..3313356 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -600,8 +601,31 @@
 		guid = *(__be64 *) ifp->if_guid;
 	dd->ipath_guid = guid;
 	dd->ipath_nguid = ifp->if_numguid;
-	memcpy(dd->ipath_serial, ifp->if_serial,
-	       sizeof(ifp->if_serial));
+	/*
+	 * Things are slightly complicated by the desire to transparently
+	 * support both the Pathscale 10-digit serial number and the QLogic
+	 * 13-character version.
+	 */
+	if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
+		&& ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
+		/* This board has a Serial-prefix, which is stored
+		 * elsewhere for backward-compatibility.
+		 */
+		char *snp = dd->ipath_serial;
+		int len;
+		memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+		snp[sizeof ifp->if_sprefix] = '\0';
+		len = strlen(snp);
+		snp += len;
+		len = (sizeof dd->ipath_serial) - len;
+		if (len > sizeof ifp->if_serial) {
+			len = sizeof ifp->if_serial;
+		}
+		memcpy(snp, ifp->if_serial, len);
+	} else
+		memcpy(dd->ipath_serial, ifp->if_serial,
+		       sizeof ifp->if_serial);
+
 	ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
 		   (unsigned long long) be64_to_cpu(dd->ipath_guid));
 
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index ada267e4..bbaa70e 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -38,8 +39,8 @@
 #include <asm/pgtable.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 static int ipath_open(struct inode *, struct file *);
 static int ipath_close(struct inode *, struct file *);
@@ -122,6 +123,7 @@
 	 * on to yet another method of dealing with this
 	 */
 	kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
+	kinfo->spi_rcvhdr_tailaddr = (u64)pd->port_rcvhdrqtailaddr_phys;
 	kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
 	kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
 	kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
@@ -456,7 +458,7 @@
 	u16 lkey = key & 0x7FFF;
 	int ret;
 
-	if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) {
+	if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
 		/* nothing to do; this key always valid */
 		ret = 0;
 		goto bail;
@@ -704,6 +706,15 @@
 	unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
 	size_t size;
 	int ret;
+	gfp_t gfp_flags;
+
+	/*
+	 * GFP_USER, but without GFP_FS, so buffer cache can be
+	 * coalesced (we hope); otherwise, even at order 4,
+	 * heavy filesystem activity makes these fail, and we can
+	 * use compound pages.
+	 */
+	gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
 
 	egrcnt = dd->ipath_rcvegrcnt;
 	/* TID number offset for this port */
@@ -720,10 +731,8 @@
 	 * memory pressure (creating large files and then copying them over
 	 * NFS while doing lots of MPI jobs), we hit some allocation
 	 * failures, even though we can sleep...  (2.6.10) Still get
-	 * failures at 64K.  32K is the lowest we can go without waiting
-	 * more memory again.  It seems likely that the coalescing in
-	 * free_pages, etc. still has issues (as it has had previously
-	 * during 2.6.x development).
+	 * failures at 64K.  32K is the lowest we can go without wasting
+	 * additional memory.
 	 */
 	size = 0x8000;
 	alloced = ALIGN(egrsize * egrcnt, size);
@@ -744,12 +753,6 @@
 		goto bail_rcvegrbuf;
 	}
 	for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-		/*
-		 * GFP_USER, but without GFP_FS, so buffer cache can be
-		 * coalesced (we hope); otherwise, even at order 4,
-		 * heavy filesystem activity makes these fail
-		 */
-		gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
 
 		pd->port_rcvegrbuf[e] = dma_alloc_coherent(
 			&dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
@@ -783,11 +786,12 @@
 
 bail_rcvegrbuf_phys:
 	for (e = 0; e < pd->port_rcvegrbuf_chunks &&
-		     pd->port_rcvegrbuf[e]; e++)
+		pd->port_rcvegrbuf[e]; e++) {
 		dma_free_coherent(&dd->pcidev->dev, size,
 				  pd->port_rcvegrbuf[e],
 				  pd->port_rcvegrbuf_phys[e]);
 
+	}
 	vfree(pd->port_rcvegrbuf_phys);
 	pd->port_rcvegrbuf_phys = NULL;
 bail_rcvegrbuf:
@@ -802,10 +806,7 @@
 {
 	int ret = 0;
 	struct ipath_devdata *dd = pd->port_dd;
-	u64 physaddr, uaddr, off, atmp;
-	struct page *pagep;
 	u32 head32;
-	u64 head;
 
 	/* for now, if major version is different, bail */
 	if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
@@ -830,54 +831,6 @@
 
 	/* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
 
-	/* set up for the rcvhdr Q tail register writeback to user memory */
-	if (!uinfo->spu_rcvhdraddr ||
-	    !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
-		       uinfo->spu_rcvhdraddr, sizeof(u64))) {
-		ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
-			  pd->port_port,
-			  (unsigned long long) uinfo->spu_rcvhdraddr);
-		ret = -EINVAL;
-		goto done;
-	}
-
-	off = offset_in_page(uinfo->spu_rcvhdraddr);
-	uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
-	ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
-	if (ret) {
-		dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
-			 "address %llx for rcvhdrtail: errno %d\n",
-			 (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
-		goto done;
-	}
-	ipath_stats.sps_pagelocks++;
-	pd->port_rcvhdrtail_uaddr = uaddr;
-	pd->port_rcvhdrtail_pagep = pagep;
-	pd->port_rcvhdrtail_kvaddr =
-		page_address(pagep);
-	pd->port_rcvhdrtail_kvaddr += off;
-	physaddr = page_to_phys(pagep) + off;
-	ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
-		   "physical (off=%llx)\n",
-		   pd->port_port,
-		   (unsigned long long) uinfo->spu_rcvhdraddr,
-		   (unsigned long long) physaddr, (unsigned long long) off);
-	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-			      pd->port_port, physaddr);
-	atmp = ipath_read_kreg64_port(dd,
-				      dd->ipath_kregs->kr_rcvhdrtailaddr,
-				      pd->port_port);
-	if (physaddr != atmp) {
-		ipath_dev_err(dd,
-			      "Catastrophic software error, "
-			      "RcvHdrTailAddr%u written as %llx, "
-			      "read back as %llx\n", pd->port_port,
-			      (unsigned long long) physaddr,
-			      (unsigned long long) atmp);
-		ret = -EINVAL;
-		goto done;
-	}
-
 	/* for right now, kernel piobufs are at end, so port 1 is at 0 */
 	pd->port_piobufs = dd->ipath_piobufbase +
 		dd->ipath_pbufsport * (pd->port_port -
@@ -896,26 +849,18 @@
 		ret = ipath_create_user_egr(pd);
 	if (ret)
 		goto done;
-	/* enable receives now */
-	/* atomically set enable bit for this port */
-	set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
-		&dd->ipath_rcvctrl);
 
 	/*
-	 * set the head registers for this port to the current values
+	 * set the eager head register for this port to the current values
 	 * of the tail pointers, since we don't know if they were
 	 * updated on last use of the port.
 	 */
-	head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-	head = (u64) head32;
-	ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
 	head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
 	ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
 	dd->ipath_lastegrheads[pd->port_port] = -1;
 	dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
-	ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from "
-		   "tail regs\n", pd->port_port,
-		   (unsigned long long) head, head32);
+	ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
+		pd->port_port, head32);
 	pd->port_tidcursor = 0;	/* start at beginning after open */
 	/*
 	 * now enable the port; the tail registers will be written to memory
@@ -924,24 +869,76 @@
 	 * transition from 0 to 1, so clear it first, then set it as part of
 	 * enabling the port.  This will (very briefly) affect any other
 	 * open ports, but it shouldn't be long enough to be an issue.
+	 * We explictly set the in-memory copy to 0 beforehand, so we don't
+	 * have to wait to be sure the DMA update has happened.
 	 */
+	*pd->port_rcvhdrtail_kvaddr = 0ULL;
+	set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
+		&dd->ipath_rcvctrl);
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
 			 dd->ipath_rcvctrl);
-
 done:
 	return ret;
 }
 
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int ipath_mmap_mem(struct vm_area_struct *vma,
+			     struct ipath_portdata *pd, unsigned len,
+			     int write_ok, dma_addr_t addr, char *what)
+{
+	struct ipath_devdata *dd = pd->port_dd;
+	unsigned pfn = (unsigned long)addr >> PAGE_SHIFT;
+	int ret;
+
+	if ((vma->vm_end - vma->vm_start) > len) {
+		dev_info(&dd->pcidev->dev,
+		         "FAIL on %s: len %lx > %x\n", what,
+			 vma->vm_end - vma->vm_start, len);
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	if (!write_ok) {
+		if (vma->vm_flags & VM_WRITE) {
+			dev_info(&dd->pcidev->dev,
+				 "%s must be mapped readonly\n", what);
+			ret = -EPERM;
+			goto bail;
+		}
+
+		/* don't allow them to later change with mprotect */
+		vma->vm_flags &= ~VM_MAYWRITE;
+	}
+
+	ret = remap_pfn_range(vma, vma->vm_start, pfn,
+			      len, vma->vm_page_prot);
+	if (ret)
+		dev_info(&dd->pcidev->dev,
+			 "%s port%u mmap of %lx, %x bytes r%c failed: %d\n",
+			 what, pd->port_port, (unsigned long)addr, len,
+			 write_ok?'w':'o', ret);
+	else
+		ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes r%c\n",
+			what, pd->port_port, (unsigned long)addr, len,
+			 write_ok?'w':'o');
+bail:
+	return ret;
+}
+
 static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
 		     u64 ureg)
 {
 	unsigned long phys;
 	int ret;
 
-	/* it's the real hardware, so io_remap works */
-
+	/*
+	 * This is real hardware, so use io_remap.  This is the mechanism
+	 * for the user process to update the head registers for their port
+	 * in the chip.
+	 */
 	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
 		dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
 			 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
@@ -967,10 +964,11 @@
 	int ret;
 
 	/*
-	 * When we map the PIO buffers, we want to map them as writeonly, no
-	 * read possible.
+	 * When we map the PIO buffers in the chip, we want to map them as
+	 * writeonly, no read possible.   This prevents access to previous
+	 * process data, and catches users who might try to read the i/o
+	 * space due to a bug.
 	 */
-
 	if ((vma->vm_end - vma->vm_start) >
 	    (dd->ipath_pbufsport * dd->ipath_palign)) {
 		dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
@@ -981,11 +979,10 @@
 	}
 
 	phys = dd->ipath_physaddr + pd->port_piobufs;
+
 	/*
-	 * Do *NOT* mark this as non-cached (PWT bit), or we don't get the
+	 * Don't mark this as non-cached, or we don't get the
 	 * write combining behavior we want on the PIO buffers!
-	 * vma->vm_page_prot =
-	 *        pgprot_noncached(vma->vm_page_prot);
 	 */
 
 	if (vma->vm_flags & VM_READ) {
@@ -997,8 +994,7 @@
 	}
 
 	/* don't allow them to later change to readable with mprotect */
-
-	vma->vm_flags &= ~VM_MAYWRITE;
+	vma->vm_flags &= ~VM_MAYREAD;
 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 
 	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
@@ -1017,11 +1013,6 @@
 	dma_addr_t *phys;
 	int ret;
 
-	if (!pd->port_rcvegrbuf) {
-		ret = -EFAULT;
-		goto bail;
-	}
-
 	size = pd->port_rcvegrbuf_size;
 	total_size = pd->port_rcvegrbuf_chunks * size;
 	if ((vma->vm_end - vma->vm_start) > total_size) {
@@ -1039,13 +1030,12 @@
 		ret = -EPERM;
 		goto bail;
 	}
+	/* don't allow them to later change to writeable with mprotect */
+	vma->vm_flags &= ~VM_MAYWRITE;
 
 	start = vma->vm_start;
 	phys = pd->port_rcvegrbuf_phys;
 
-	/* don't allow them to later change to writeable with mprotect */
-	vma->vm_flags &= ~VM_MAYWRITE;
-
 	for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
 		ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
 				      size, vma->vm_page_prot);
@@ -1058,78 +1048,6 @@
 	return ret;
 }
 
-static int mmap_rcvhdrq(struct vm_area_struct *vma,
-			struct ipath_portdata *pd)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	size_t total_size;
-	int ret;
-
-	/*
-	 * kmalloc'ed memory, physically contiguous; this is from
-	 * spi_rcvhdr_base; we allow user to map read-write so they can
-	 * write hdrq entries to allow protocol code to directly poll
-	 * whether a hdrq entry has been written.
-	 */
-	total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-			   sizeof(u32), PAGE_SIZE);
-	if ((vma->vm_end - vma->vm_start) > total_size) {
-		dev_info(&dd->pcidev->dev,
-			 "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
-			 vma->vm_end - vma->vm_start,
-			 (unsigned long) total_size);
-		ret = -EFAULT;
-		goto bail;
-	}
-
-	ret = remap_pfn_range(vma, vma->vm_start,
-			      pd->port_rcvhdrq_phys >> PAGE_SHIFT,
-			      vma->vm_end - vma->vm_start,
-			      vma->vm_page_prot);
-bail:
-	return ret;
-}
-
-static int mmap_pioavailregs(struct vm_area_struct *vma,
-			     struct ipath_portdata *pd)
-{
-	struct ipath_devdata *dd = pd->port_dd;
-	int ret;
-
-	/*
-	 * when we map the PIO bufferavail registers, we want to map them as
-	 * readonly, no write possible.
-	 *
-	 * kmalloc'ed memory, physically contiguous, one page only, readonly
-	 */
-
-	if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
-		dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
-			 "reqlen %lx > actual %lx\n",
-			 vma->vm_end - vma->vm_start,
-			 (unsigned long) PAGE_SIZE);
-		ret = -EFAULT;
-		goto bail;
-	}
-
-	if (vma->vm_flags & VM_WRITE) {
-		dev_info(&dd->pcidev->dev,
-			 "Can't map pioavailregs as writable (flags=%lx)\n",
-			 vma->vm_flags);
-		ret = -EPERM;
-		goto bail;
-	}
-
-	/* don't allow them to later change with mprotect */
-	vma->vm_flags &= ~VM_MAYWRITE;
-
-	ret = remap_pfn_range(vma, vma->vm_start,
-			      dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
-			      PAGE_SIZE, vma->vm_page_prot);
-bail:
-	return ret;
-}
-
 /**
  * ipath_mmap - mmap various structures into user space
  * @fp: the file pointer
@@ -1149,6 +1067,7 @@
 
 	pd = port_fp(fp);
 	dd = pd->port_dd;
+
 	/*
 	 * This is the ipath_do_user_init() code, mapping the shared buffers
 	 * into the user process. The address referred to by vm_pgoff is the
@@ -1158,28 +1077,59 @@
 	pgaddr = vma->vm_pgoff << PAGE_SHIFT;
 
 	/*
-	 * note that ureg does *NOT* have the kregvirt as part of it, to be
-	 * sure that for 32 bit programs, we don't end up trying to map a >
-	 * 44 address.  Has to match ipath_get_base_info() code that sets
-	 * __spi_uregbase
+	 * Must fit in 40 bits for our hardware; some checked elsewhere,
+	 * but we'll be paranoid.  Check for 0 is mostly in case one of the
+	 * allocations failed, but user called mmap anyway.   We want to catch
+	 * that before it can match.
 	 */
+	if (!pgaddr || pgaddr >= (1ULL<<40))  {
+		ipath_dev_err(dd, "Bad phys addr %llx, start %lx, end %lx\n",
+			(unsigned long long)pgaddr, vma->vm_start, vma->vm_end);
+		return -EINVAL;
+	}
 
+	/* just the offset of the port user registers, not physical addr */
 	ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
 
 	ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
 		   (unsigned long long) pgaddr, vma->vm_start,
 		   vma->vm_end - vma->vm_start);
 
-	if (pgaddr == ureg)
+	if (vma->vm_start & (PAGE_SIZE-1)) {
+		ipath_dev_err(dd,
+			"vm_start not aligned: %lx, end=%lx phys %lx\n",
+			vma->vm_start, vma->vm_end, (unsigned long)pgaddr);
+		ret = -EINVAL;
+	}
+	else if (pgaddr == ureg)
 		ret = mmap_ureg(vma, dd, ureg);
 	else if (pgaddr == pd->port_piobufs)
 		ret = mmap_piobufs(vma, dd, pd);
 	else if (pgaddr == (u64) pd->port_rcvegr_phys)
 		ret = mmap_rcvegrbufs(vma, pd);
-	else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
-		ret = mmap_rcvhdrq(vma, pd);
+	else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
+		/*
+		 * The rcvhdrq itself; readonly except on HT-400 (so have
+		 * to allow writable mapping), multiple pages, contiguous
+		 * from an i/o perspective.
+		 */
+		unsigned total_size =
+			ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize
+			   * sizeof(u32), PAGE_SIZE);
+		ret = ipath_mmap_mem(vma, pd, total_size, 1,
+				     pd->port_rcvhdrq_phys,
+				     "rcvhdrq");
+	}
+	else if (pgaddr == (u64)pd->port_rcvhdrqtailaddr_phys)
+		/* in-memory copy of rcvhdrq tail register */
+		ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+				     pd->port_rcvhdrqtailaddr_phys,
+				     "rcvhdrq tail");
 	else if (pgaddr == dd->ipath_pioavailregs_phys)
-		ret = mmap_pioavailregs(vma, pd);
+		/* in-memory copy of pioavail registers */
+		ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
+				     dd->ipath_pioavailregs_phys,
+				     "pioavail registers");
 	else
 		ret = -EINVAL;
 
@@ -1442,16 +1392,16 @@
 
 static int ipath_open(struct inode *in, struct file *fp)
 {
-	int ret, minor;
+	int ret, user_minor;
 
 	mutex_lock(&ipath_mutex);
 
-	minor = iminor(in);
+	user_minor = iminor(in) - IPATH_USER_MINOR_BASE;
 	ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
-		   (long)in->i_rdev, minor);
+		   (long)in->i_rdev, user_minor);
 
-	if (minor)
-		ret = find_free_port(minor - 1, fp);
+	if (user_minor)
+		ret = find_free_port(user_minor - 1, fp);
 	else
 		ret = find_best_unit(fp);
 
@@ -1536,53 +1486,54 @@
 	}
 
 	if (dd->ipath_kregbase) {
-		if (pd->port_rcvhdrtail_uaddr) {
-			pd->port_rcvhdrtail_uaddr = 0;
-			pd->port_rcvhdrtail_kvaddr = NULL;
-			ipath_release_user_pages_on_close(
-				&pd->port_rcvhdrtail_pagep, 1);
-			pd->port_rcvhdrtail_pagep = NULL;
-			ipath_stats.sps_pageunlocks++;
-		}
-		ipath_write_kreg_port(
-			dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-			port, 0ULL);
-		ipath_write_kreg_port(
-			dd, dd->ipath_kregs->kr_rcvhdraddr,
-			pd->port_port, 0);
+		int i;
+		/* atomically clear receive enable port. */
+		clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
+			  &dd->ipath_rcvctrl);
+		ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
+			dd->ipath_rcvctrl);
+		/* and read back from chip to be sure that nothing
+		 * else is in flight when we do the rest */
+		(void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
 
 		/* clean up the pkeys for this port user */
 		ipath_clean_part_key(pd, dd);
 
-		if (port < dd->ipath_cfgports) {
-			int i = dd->ipath_pbufsport * (port - 1);
-			ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
 
-			/* atomically clear receive enable port. */
-			clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
-				  &dd->ipath_rcvctrl);
-			ipath_write_kreg(
-				dd,
-				dd->ipath_kregs->kr_rcvctrl,
-				dd->ipath_rcvctrl);
+		/*
+		 * be paranoid, and never write 0's to these, just use an
+		 * unused part of the port 0 tail page.  Of course,
+		 * rcvhdraddr points to a large chunk of memory, so this
+		 * could still trash things, but at least it won't trash
+		 * page 0, and by disabling the port, it should stop "soon",
+		 * even if a packet or two is in already in flight after we
+		 * disabled the port.
+		 */
+		ipath_write_kreg_port(dd,
+		        dd->ipath_kregs->kr_rcvhdrtailaddr, port,
+			dd->ipath_dummy_hdrq_phys);
+		ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
+			pd->port_port, dd->ipath_dummy_hdrq_phys);
 
-			if (dd->ipath_pageshadow)
-				unlock_expected_tids(pd);
-			ipath_stats.sps_ports--;
-			ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
-				   pd->port_comm, pd->port_pid,
-				   dd->ipath_unit, port);
-		}
+		i = dd->ipath_pbufsport * (port - 1);
+		ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
+
+		if (dd->ipath_pageshadow)
+			unlock_expected_tids(pd);
+		ipath_stats.sps_ports--;
+		ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
+			   pd->port_comm, pd->port_pid,
+			   dd->ipath_unit, port);
+
+		dd->ipath_f_clear_tids(dd, pd->port_port);
 	}
 
 	pd->port_cnt = 0;
 	pd->port_pid = 0;
 
-	dd->ipath_f_clear_tids(dd, pd->port_port);
-
-	ipath_free_pddata(dd, pd->port_port, 0);
-
+	dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
 	mutex_unlock(&ipath_mutex);
+	ipath_free_pddata(dd, pd); /* after releasing the mutex */
 
 	return ret;
 }
@@ -1859,19 +1810,12 @@
 				      "error %d\n", -ret);
 			goto bail;
 		}
-		ret = ipath_diag_init();
-		if (ret < 0) {
-			ipath_dev_err(dd, "Unable to set up diag support: "
-				      "error %d\n", -ret);
-			goto bail_sma;
-		}
-
 		ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
 				&wildcard_class_dev);
 		if (ret < 0) {
 			ipath_dev_err(dd, "Could not create wildcard "
 				      "minor: error %d\n", -ret);
-			goto bail_diag;
+			goto bail_sma;
 		}
 
 		atomic_set(&user_setup, 1);
@@ -1880,31 +1824,28 @@
 	snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
 
 	ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
-			&dd->cdev, &dd->class_dev);
+			&dd->user_cdev, &dd->user_class_dev);
 	if (ret < 0)
 		ipath_dev_err(dd, "Could not create user minor %d, %s\n",
 			      dd->ipath_unit + 1, name);
 
 	goto bail;
 
-bail_diag:
-	ipath_diag_cleanup();
 bail_sma:
 	user_cleanup();
 bail:
 	return ret;
 }
 
-void ipath_user_del(struct ipath_devdata *dd)
+void ipath_user_remove(struct ipath_devdata *dd)
 {
-	cleanup_cdev(&dd->cdev, &dd->class_dev);
+	cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
 
 	if (atomic_dec_return(&user_count) == 0) {
 		if (atomic_read(&user_setup) == 0)
 			goto bail;
 
 		cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
-		ipath_diag_cleanup();
 		user_cleanup();
 
 		atomic_set(&user_setup, 0);
@@ -1912,3 +1853,4 @@
 bail:
 	return;
 }
+
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 97f142c..0936d8e 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_ht400.c
index fac0a2b..3db015d 100644
--- a/drivers/infiniband/hw/ipath/ipath_ht400.c
+++ b/drivers/infiniband/hw/ipath/ipath_ht400.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -1572,7 +1573,6 @@
 	dd->ipath_f_reset = ipath_setup_ht_reset;
 	dd->ipath_f_get_boardname = ipath_ht_boardname;
 	dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
-	dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
 	dd->ipath_f_early_init = ipath_ht_early_init;
 	dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
 	dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index dc83250..414cdd1 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -35,7 +36,7 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 /*
  * min buffers we want to have per port, after driver
@@ -114,6 +115,7 @@
 				      "eager TID %u\n", e);
 			while (e != 0)
 				dev_kfree_skb(skbs[--e]);
+			vfree(skbs);
 			ret = -ENOMEM;
 			goto bail;
 		}
@@ -275,7 +277,7 @@
 	pd->port_port = 0;
 	pd->port_cnt = 1;
 	/* The port 0 pkey table is used by the layer interface. */
-	pd->port_pkeys[0] = IPS_DEFAULT_P_KEY;
+	pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
 	dd->ipath_rcvtidcnt =
 		ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
 	dd->ipath_rcvtidbase =
@@ -409,17 +411,8 @@
 	/* and its length */
 	dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
 
-	if (dd->ipath_unit * 64 > (IPATH_PORT0_RCVHDRTAIL_SIZE - 64)) {
-		ipath_dev_err(dd, "unit %u too large for port 0 "
-			      "rcvhdrtail buffer size\n", dd->ipath_unit);
-		ret = -ENODEV;
-	}
-	else
-		ret = 0;
+	ret = 0;
 
-	/* so we can get current tail in ipath_kreceive(), per chip */
-	dd->ipath_hdrqtailptr = &ipath_port0_rcvhdrtail[
-		dd->ipath_unit * (64 / sizeof(*ipath_port0_rcvhdrtail))];
 done:
 	return ret;
 }
@@ -652,8 +645,9 @@
 {
 	int ret = 0, i;
 	u32 val32, kpiobufs;
-	u64 val, atmp;
+	u64 val;
 	struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
+	gfp_t gfp_flags = GFP_USER | __GFP_COMP;
 
 	ret = init_housekeeping(dd, &pd, reinit);
 	if (ret)
@@ -775,24 +769,6 @@
 		goto done;
 	}
 
-	val = ipath_port0_rcvhdrtail_dma + dd->ipath_unit * 64;
-
-	/* verify that the alignment requirement was met */
-	ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-			      0, val);
-	atmp = ipath_read_kreg64_port(
-		dd, dd->ipath_kregs->kr_rcvhdrtailaddr, 0);
-	if (val != atmp) {
-		ipath_dev_err(dd, "Catastrophic software error, "
-			      "RcvHdrTailAddr0 written as %llx, "
-			      "read back as %llx from %x\n",
-			      (unsigned long long) val,
-			      (unsigned long long) atmp,
-			      dd->ipath_kregs->kr_rcvhdrtailaddr);
-		ret = -EINVAL;
-		goto done;
-	}
-
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
 
 	/*
@@ -836,25 +812,45 @@
 	/* clear any interrups up to this point (ints still not enabled) */
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
 
-	ipath_stats.sps_lid[dd->ipath_unit] = dd->ipath_lid;
-
 	/*
 	 * Set up the port 0 (kernel) rcvhdr q and egr TIDs.  If doing
 	 * re-init, the simplest way to handle this is to free
 	 * existing, and re-allocate.
 	 */
-	if (reinit)
-		ipath_free_pddata(dd, 0, 0);
+	if (reinit) {
+		struct ipath_portdata *pd = dd->ipath_pd[0];
+		dd->ipath_pd[0] = NULL;
+		ipath_free_pddata(dd, pd);
+	}
 	dd->ipath_f_tidtemplate(dd);
 	ret = ipath_create_rcvhdrq(dd, pd);
-	if (!ret)
+	if (!ret) {
+		dd->ipath_hdrqtailptr =
+			(volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
 		ret = create_port0_egr(dd);
+	}
 	if (ret)
 		ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
 			      "rcvhdrq and/or egr bufs\n");
 	else
 		enable_chip(dd, pd, reinit);
 
+
+	if (!ret && !reinit) {
+	    /* used when we close a port, for DMA already in flight at close */
+		dd->ipath_dummy_hdrq = dma_alloc_coherent(
+			&dd->pcidev->dev, pd->port_rcvhdrq_size,
+			&dd->ipath_dummy_hdrq_phys,
+			gfp_flags);
+		if (!dd->ipath_dummy_hdrq ) {
+			dev_info(&dd->pcidev->dev,
+				"Couldn't allocate 0x%lx bytes for dummy hdrq\n",
+				pd->port_rcvhdrq_size);
+			/* fallback to just 0'ing */
+			dd->ipath_dummy_hdrq_phys = 0UL;
+		}
+	}
+
 	/*
 	 * cause retrigger of pending interrupts ignored during init,
 	 * even if we had errors
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 5e31d0d..280e732 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -33,9 +34,10 @@
 #include <linux/pci.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
+/* These are all rcv-related errors which we want to count for stats */
 #define E_SUM_PKTERRS \
 	(INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
 	 INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
@@ -44,6 +46,7 @@
 	 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
 	 INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
 
+/* These are all send-related errors which we want to count for stats */
 #define E_SUM_ERRS \
 	(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
 	 INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
@@ -51,6 +54,18 @@
 	 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
 	 INFINIPATH_E_INVALIDADDR)
 
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received.  This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+	(INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
+	 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
+	 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
+	 INFINIPATH_E_RUNEXPCHAR)
+
 static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
 {
 	unsigned long sbuf[4];
@@ -100,9 +115,7 @@
 		if (ipath_debug & __IPATH_PKTDBG)
 			printk("\n");
 	}
-	if ((errs & (INFINIPATH_E_SDROPPEDDATAPKT |
-		     INFINIPATH_E_SDROPPEDSMPPKT |
-		     INFINIPATH_E_SMINPKTLEN)) &&
+	if ((errs & E_SUM_LINK_PKTERRS) &&
 	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
 		/*
 		 * This can happen when SMA is trying to bring the link
@@ -111,11 +124,9 @@
 		 * valid.  We don't want to confuse people, so we just
 		 * don't print them, except at debug
 		 */
-		ipath_dbg("Ignoring pktsend errors %llx, because not "
-			  "yet active\n", (unsigned long long) errs);
-		ignore_this_time = INFINIPATH_E_SDROPPEDDATAPKT |
-			INFINIPATH_E_SDROPPEDSMPPKT |
-			INFINIPATH_E_SMINPKTLEN;
+		ipath_dbg("Ignoring packet errors %llx, because link not "
+			  "ACTIVE\n", (unsigned long long) errs);
+		ignore_this_time = errs & E_SUM_LINK_PKTERRS;
 	}
 
 	return ignore_this_time;
@@ -156,7 +167,29 @@
 	 */
 	val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
 	lstate = val & IPATH_IBSTATE_MASK;
-	if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
+
+	/*
+	 * this is confusing enough when it happens that I want to always put it
+	 * on the console and in the logs.  If it was a requested state change,
+	 * we'll have already cleared the flags, so we won't print this warning
+	 */
+	if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE)
+		&& (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
+		dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n",
+				 (dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE",
+				 ib_linkstate(lstate));
+		/*
+		 * Flush all queued sends when link went to DOWN or INIT,
+		 * to be sure that they don't block SMA and other MAD packets
+		 */
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+				 INFINIPATH_S_ABORT);
+		ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
+							(unsigned)(dd->ipath_piobcnt2k +
+					dd->ipath_piobcnt4k) -
+					dd->ipath_lastport_piobuf);
+	}
+	else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
 	    lstate == IPATH_IBSTATE_ACTIVE) {
 		/*
 		 * only print at SMA if there is a change, debug if not
@@ -229,6 +262,7 @@
 				     | IPATH_LINKACTIVE |
 				     IPATH_LINKARMED);
 		*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
+		dd->ipath_lli_counter = 0;
 		if (!noprint) {
 			if (((dd->ipath_lastibcstat >>
 			      INFINIPATH_IBCS_LINKSTATE_SHIFT) &
@@ -350,7 +384,7 @@
 	return supp_msgs;
 }
 
-static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
+static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
 {
 	char msg[512];
 	u64 ignore_this_time = 0;
@@ -379,6 +413,19 @@
 
 	if (errs & E_SUM_ERRS)
 		ignore_this_time = handle_e_sum_errs(dd, errs);
+	else if ((errs & E_SUM_LINK_PKTERRS) &&
+	    !(dd->ipath_flags & IPATH_LINKACTIVE)) {
+		/*
+		 * This can happen when SMA is trying to bring the link
+		 * up, but the IB link changes state at the "wrong" time.
+		 * The IB logic then complains that the packet isn't
+		 * valid.  We don't want to confuse people, so we just
+		 * don't print them, except at debug
+		 */
+		ipath_dbg("Ignoring packet errors %llx, because link not "
+			  "ACTIVE\n", (unsigned long long) errs);
+		ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+	}
 
 	if (supp_msgs == 250000) {
 		/*
@@ -434,7 +481,7 @@
 			  INFINIPATH_E_IBSTATUSCHANGED);
 	}
 	if (!errs)
-		return;
+		return 0;
 
 	if (!noprint)
 		/*
@@ -493,10 +540,10 @@
 				continue;
 			if (hd == (tl + 1) ||
 			    (!hd && tl == dd->ipath_hdrqlast)) {
-				dd->ipath_lastrcvhdrqtails[i] = tl;
-				pd->port_hdrqfull++;
 				if (i == 0)
 					chkerrpkts = 1;
+				dd->ipath_lastrcvhdrqtails[i] = tl;
+				pd->port_hdrqfull++;
 			}
 		}
 	}
@@ -558,9 +605,7 @@
 		wake_up_interruptible(&ipath_sma_state_wait);
 	}
 
-	if (chkerrpkts)
-		/* process possible error packets in hdrq */
-		ipath_kreceive(dd);
+	return chkerrpkts;
 }
 
 /* this is separate to allow for better optimization of ipath_intr() */
@@ -678,7 +723,12 @@
 			 dd->ipath_sendctrl);
 }
 
-static void handle_rcv(struct ipath_devdata *dd, u32 istat)
+/*
+ * Handle receive interrupts for user ports; this means a user
+ * process was waiting for a packet to arrive, and didn't want
+ * to poll
+ */
+static void handle_urcv(struct ipath_devdata *dd, u32 istat)
 {
 	u64 portr;
 	int i;
@@ -688,22 +738,17 @@
 		 infinipath_i_rcvavail_mask)
 		| ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
 		   infinipath_i_rcvurg_mask);
-	for (i = 0; i < dd->ipath_cfgports; i++) {
+	for (i = 1; i < dd->ipath_cfgports; i++) {
 		struct ipath_portdata *pd = dd->ipath_pd[i];
-		if (portr & (1 << i) && pd &&
-		    pd->port_cnt) {
-			if (i == 0)
-				ipath_kreceive(dd);
-			else if (test_bit(IPATH_PORT_WAITING_RCV,
-					  &pd->port_flag)) {
-				int rcbit;
-				clear_bit(IPATH_PORT_WAITING_RCV,
-					  &pd->port_flag);
-				rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
-				clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
-				wake_up_interruptible(&pd->port_wait);
-				rcvdint = 1;
-			}
+		if (portr & (1 << i) && pd && pd->port_cnt &&
+			test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
+			int rcbit;
+			clear_bit(IPATH_PORT_WAITING_RCV,
+				  &pd->port_flag);
+			rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
+			clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
+			wake_up_interruptible(&pd->port_wait);
+			rcvdint = 1;
 		}
 	}
 	if (rcvdint) {
@@ -719,16 +764,19 @@
 irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
 {
 	struct ipath_devdata *dd = data;
-	u32 istat;
+	u32 istat, chk0rcv = 0;
 	ipath_err_t estat = 0;
-	static unsigned unexpected = 0;
 	irqreturn_t ret;
+	u32 oldhead, curtail;
+	static unsigned unexpected = 0;
+	static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
+		 (1U<<INFINIPATH_I_RCVURG_SHIFT);
 
-	if(!(dd->ipath_flags & IPATH_PRESENT)) {
-		/* this is mostly so we don't try to touch the chip while
-		 * it is being reset */
+	ipath_stats.sps_ints++;
+
+	if (!(dd->ipath_flags & IPATH_PRESENT)) {
 		/*
-		 * This return value is perhaps odd, but we do not want the
+		 * This return value is not great, but we do not want the
 		 * interrupt core code to remove our interrupt handler
 		 * because we don't appear to be handling an interrupt
 		 * during a chip reset.
@@ -736,21 +784,6 @@
 		return IRQ_HANDLED;
 	}
 
-	istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
-	if (unlikely(!istat)) {
-		ipath_stats.sps_nullintr++;
-		ret = IRQ_NONE; /* not our interrupt, or already handled */
-		goto bail;
-	}
-	if (unlikely(istat == -1)) {
-		ipath_bad_regread(dd);
-		/* don't know if it was our interrupt or not */
-		ret = IRQ_NONE;
-		goto bail;
-	}
-
-	ipath_stats.sps_ints++;
-
 	/*
 	 * this needs to be flags&initted, not statusp, so we keep
 	 * taking interrupts even after link goes down, etc.
@@ -763,17 +796,62 @@
 		ret = IRQ_NONE;
 		goto bail;
 	}
+
+	/*
+	 * We try to avoid reading the interrupt status register, since
+	 * that's a PIO read, and stalls the processor for up to about
+	 * ~0.25 usec. The idea is that if we processed a port0 packet,
+	 * we blindly clear the  port 0 receive interrupt bits, and nothing
+	 * else, then return.  If other interrupts are pending, the chip
+	 * will re-interrupt us as soon as we write the intclear register.
+	 * We then won't process any more kernel packets (if not the 2nd
+	 * time, then the 3rd or 4th) and we'll then handle the other
+	 * interrupts.   We clear the interrupts first so that we don't
+	 * lose intr for later packets that arrive while we are processing.
+	 */
+	oldhead = dd->ipath_port0head;
+	curtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
+	if (oldhead != curtail) {
+		if (dd->ipath_flags & IPATH_GPIO_INTR) {
+			ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
+					 (u64) (1 << 2));
+			istat = port0rbits | INFINIPATH_I_GPIO;
+		}
+		else
+			istat = port0rbits;
+		ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
+		ipath_kreceive(dd);
+		if (oldhead != dd->ipath_port0head) {
+			ipath_stats.sps_fastrcvint++;
+			goto done;
+		}
+	}
+
+	istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
+
+	if (unlikely(!istat)) {
+		ipath_stats.sps_nullintr++;
+		ret = IRQ_NONE; /* not our interrupt, or already handled */
+		goto bail;
+	}
+	if (unlikely(istat == -1)) {
+		ipath_bad_regread(dd);
+		/* don't know if it was our interrupt or not */
+		ret = IRQ_NONE;
+		goto bail;
+	}
+
 	if (unexpected)
 		unexpected = 0;
 
-	ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
-
-	if (istat & ~infinipath_i_bitsextant)
+	if (unlikely(istat & ~infinipath_i_bitsextant))
 		ipath_dev_err(dd,
 			      "interrupt with unknown interrupts %x set\n",
 			      istat & (u32) ~ infinipath_i_bitsextant);
+	else
+		ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
 
-	if (istat & INFINIPATH_I_ERROR) {
+	if (unlikely(istat & INFINIPATH_I_ERROR)) {
 		ipath_stats.sps_errints++;
 		estat = ipath_read_kreg64(dd,
 					  dd->ipath_kregs->kr_errorstatus);
@@ -788,10 +866,18 @@
 			ipath_dev_err(dd, "Read of error status failed "
 				      "(all bits set); ignoring\n");
 		else
-			handle_errors(dd, estat);
+			if (handle_errors(dd, estat))
+				/* force calling ipath_kreceive() */
+				chk0rcv = 1;
 	}
 
 	if (istat & INFINIPATH_I_GPIO) {
+		/*
+		 * Packets are available in the port 0 rcv queue.
+		 * Eventually this needs to be generalized to check
+		 * IPATH_GPIO_INTR, and the specific GPIO bit, if
+		 * GPIO interrupts are used for anything else.
+		 */
 		if (unlikely(!(dd->ipath_flags & IPATH_GPIO_INTR))) {
 			u32 gpiostatus;
 			gpiostatus = ipath_read_kreg32(
@@ -804,27 +890,39 @@
 		else {
 			/* Clear GPIO status bit 2 */
 			ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
-					 (u64) (1 << 2));
-
-			/*
-			 * Packets are available in the port 0 rcv queue.
-			 * Eventually this needs to be generalized to check
-			 * IPATH_GPIO_INTR, and the specific GPIO bit, if
-			 * GPIO interrupts are used for anything else.
-			 */
-			ipath_kreceive(dd);
+					(u64) (1 << 2));
+			chk0rcv = 1;
 		}
 	}
+	chk0rcv |= istat & port0rbits;
 
 	/*
-	 * clear the ones we will deal with on this round
-	 * We clear it early, mostly for receive interrupts, so we
-	 * know the chip will have seen this by the time we process
-	 * the queue, and will re-interrupt if necessary.  The processor
-	 * itself won't take the interrupt again until we return.
+	 * Clear the interrupt bits we found set, unless they are receive
+	 * related, in which case we already cleared them above, and don't
+	 * want to clear them again, because we might lose an interrupt.
+	 * Clear it early, so we "know" know the chip will have seen this by
+	 * the time we process the queue, and will re-interrupt if necessary.
+	 * The processor itself won't take the interrupt again until we return.
 	 */
 	ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
 
+	/*
+	 * handle port0 receive  before checking for pio buffers available,
+	 * since receives can overflow; piobuf waiters can afford a few
+	 * extra cycles, since they were waiting anyway, and user's waiting
+	 * for receive are at the bottom.
+	 */
+	if (chk0rcv) {
+		ipath_kreceive(dd);
+		istat &= ~port0rbits;
+	}
+
+	if (istat & ((infinipath_i_rcvavail_mask <<
+		      INFINIPATH_I_RCVAVAIL_SHIFT)
+		     | (infinipath_i_rcvurg_mask <<
+			INFINIPATH_I_RCVURG_SHIFT)))
+		handle_urcv(dd, istat);
+
 	if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
 		clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
 		ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
@@ -836,17 +934,7 @@
 		handle_layer_pioavail(dd);
 	}
 
-	/*
-	 * we check for both transition from empty to non-empty, and urgent
-	 * packets (those with the interrupt bit set in the header)
-	 */
-
-	if (istat & ((infinipath_i_rcvavail_mask <<
-		      INFINIPATH_I_RCVAVAIL_SHIFT)
-		     | (infinipath_i_rcvurg_mask <<
-			INFINIPATH_I_RCVURG_SHIFT)))
-		handle_rcv(dd, istat);
-
+done:
 	ret = IRQ_HANDLED;
 
 bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 5d92d57..e9f374f 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1,6 +1,7 @@
 #ifndef _IPATH_KERNEL_H
 #define _IPATH_KERNEL_H
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -61,9 +62,7 @@
 	/* rcvhdrq base, needs mmap before useful */
 	void *port_rcvhdrq;
 	/* kernel virtual address where hdrqtail is updated */
-	u64 *port_rcvhdrtail_kvaddr;
-	/* page * used for uaddr */
-	struct page *port_rcvhdrtail_pagep;
+	volatile __le64 *port_rcvhdrtail_kvaddr;
 	/*
 	 * temp buffer for expected send setup, allocated at open, instead
 	 * of each setup call
@@ -78,11 +77,7 @@
 	dma_addr_t port_rcvegr_phys;
 	/* mmap of hdrq, must fit in 44 bits */
 	dma_addr_t port_rcvhdrq_phys;
-	/*
-	 * the actual user address that we ipath_mlock'ed, so we can
-	 * ipath_munlock it at close
-	 */
-	unsigned long port_rcvhdrtail_uaddr;
+	dma_addr_t port_rcvhdrqtailaddr_phys;
 	/*
 	 * number of opens on this instance (0 or 1; ignoring forks, dup,
 	 * etc. for now)
@@ -158,16 +153,10 @@
 	/* base of memory alloced for ipath_kregbase, for free */
 	u64 *ipath_kregalloc;
 	/*
-	 * version of kregbase that doesn't have high bits set (for 32 bit
-	 * programs, so mmap64 44 bit works)
-	 */
-	u64 __iomem *ipath_kregvirt;
-	/*
 	 * virtual address where port0 rcvhdrqtail updated for this unit.
 	 * only written to by the chip, not the driver.
 	 */
 	volatile __le64 *ipath_hdrqtailptr;
-	dma_addr_t ipath_dma_addr;
 	/* ipath_cfgports pointers */
 	struct ipath_portdata **ipath_pd;
 	/* sk_buffs used by port 0 eager receive queue */
@@ -354,13 +343,17 @@
 	char *ipath_freezemsg;
 	/* pci access data structure */
 	struct pci_dev *pcidev;
-	struct cdev *cdev;
-	struct class_device *class_dev;
+	struct cdev *user_cdev;
+	struct cdev *diag_cdev;
+	struct class_device *user_class_dev;
+	struct class_device *diag_class_dev;
 	/* timer used to prevent stats overflow, error throttling, etc. */
 	struct timer_list ipath_stats_timer;
 	/* check for stale messages in rcv queue */
 	/* only allow one intr at a time. */
 	unsigned long ipath_rcv_pending;
+	void *ipath_dummy_hdrq;	/* used after port close */
+	dma_addr_t ipath_dummy_hdrq_phys;
 
 	/*
 	 * Shadow copies of registers; size indicates read access size.
@@ -500,8 +493,11 @@
 	u16 ipath_lid;
 	/* list of pkeys programmed; 0 if not set */
 	u16 ipath_pkeys[4];
-	/* ASCII serial number, from flash */
-	u8 ipath_serial[12];
+	/*
+	 * ASCII serial number, from flash, large enough for original
+	 * all digit strings, and longer QLogic serial number format
+	 */
+	u8 ipath_serial[16];
 	/* human readable board version */
 	u8 ipath_boardversion[80];
 	/* chip major rev, from ipath_revision */
@@ -516,13 +512,13 @@
 	u8 ipath_pci_cacheline;
 	/* LID mask control */
 	u8 ipath_lmc;
+
+	/* local link integrity counter */
+	u32 ipath_lli_counter;
+	/* local link integrity errors */
+	u32 ipath_lli_errors;
 };
 
-extern volatile __le64 *ipath_port0_rcvhdrtail;
-extern dma_addr_t ipath_port0_rcvhdrtail_dma;
-
-#define IPATH_PORT0_RCVHDRTAIL_SIZE PAGE_SIZE
-
 extern struct list_head ipath_dev_list;
 extern spinlock_t ipath_devs_lock;
 extern struct ipath_devdata *ipath_lookup(int unit);
@@ -537,7 +533,7 @@
 extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
 
 void ipath_layer_add(struct ipath_devdata *);
-void ipath_layer_del(struct ipath_devdata *);
+void ipath_layer_remove(struct ipath_devdata *);
 
 int ipath_init_chip(struct ipath_devdata *, int);
 int ipath_enable_wc(struct ipath_devdata *dd);
@@ -551,14 +547,14 @@
 void ipath_cdev_cleanup(struct cdev **cdevp,
 			struct class_device **class_devp);
 
-int ipath_diag_init(void);
-void ipath_diag_cleanup(void);
+int ipath_diag_add(struct ipath_devdata *);
+void ipath_diag_remove(struct ipath_devdata *);
 void ipath_diag_bringup_link(struct ipath_devdata *);
 
 extern wait_queue_head_t ipath_sma_state_wait;
 
 int ipath_user_add(struct ipath_devdata *dd);
-void ipath_user_del(struct ipath_devdata *dd);
+void ipath_user_remove(struct ipath_devdata *dd);
 
 struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
 
@@ -582,7 +578,7 @@
 			  unsigned cnt);
 
 int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
-void ipath_free_pddata(struct ipath_devdata *, u32, int);
+void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
 
 int ipath_parse_ushort(const char *str, unsigned short *valp);
 
@@ -720,13 +716,8 @@
  * @port: port number
  *
  * Return the contents of a register that is virtualized to be per port.
- * Prints a debug message and returns -1 on errors (not distinguishable from
- * valid contents at runtime; we may add a separate error variable at some
- * point).
- *
- * This is normally not used by the kernel, but may be for debugging, and
- * has a different implementation than user mode, which is why it's not in
- * _common.h.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
  */
 static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
 				    ipath_ureg regno, int port)
@@ -842,9 +833,10 @@
 
 #define IPATH_DRV_NAME		"ipath_core"
 #define IPATH_MAJOR		233
+#define IPATH_USER_MINOR_BASE	0
 #define IPATH_SMA_MINOR		128
-#define IPATH_DIAG_MINOR	129
-#define IPATH_NMINORS		130
+#define IPATH_DIAG_MINOR_BASE	129
+#define IPATH_NMINORS		255
 
 #define ipath_dev_err(dd,fmt,...) \
 	do { \
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 5ae8761..46773c6 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -120,6 +121,7 @@
 		  struct ib_sge *sge, int acc)
 {
 	struct ipath_mregion *mr;
+	unsigned n, m;
 	size_t off;
 	int ret;
 
@@ -151,20 +153,22 @@
 	}
 
 	off += mr->offset;
-	isge->mr = mr;
-	isge->m = 0;
-	isge->n = 0;
-	while (off >= mr->map[isge->m]->segs[isge->n].length) {
-		off -= mr->map[isge->m]->segs[isge->n].length;
-		isge->n++;
-		if (isge->n >= IPATH_SEGSZ) {
-			isge->m++;
-			isge->n = 0;
+	m = 0;
+	n = 0;
+	while (off >= mr->map[m]->segs[n].length) {
+		off -= mr->map[m]->segs[n].length;
+		n++;
+		if (n >= IPATH_SEGSZ) {
+			m++;
+			n = 0;
 		}
 	}
-	isge->vaddr = mr->map[isge->m]->segs[isge->n].vaddr + off;
-	isge->length = mr->map[isge->m]->segs[isge->n].length - off;
+	isge->mr = mr;
+	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+	isge->length = mr->map[m]->segs[n].length - off;
 	isge->sge_length = sge->length;
+	isge->m = m;
+	isge->n = n;
 
 	ret = 1;
 
@@ -189,6 +193,7 @@
 	struct ipath_lkey_table *rkt = &dev->lk_table;
 	struct ipath_sge *sge = &ss->sge;
 	struct ipath_mregion *mr;
+	unsigned n, m;
 	size_t off;
 	int ret;
 
@@ -206,20 +211,22 @@
 	}
 
 	off += mr->offset;
-	sge->mr = mr;
-	sge->m = 0;
-	sge->n = 0;
-	while (off >= mr->map[sge->m]->segs[sge->n].length) {
-		off -= mr->map[sge->m]->segs[sge->n].length;
-		sge->n++;
-		if (sge->n >= IPATH_SEGSZ) {
-			sge->m++;
-			sge->n = 0;
+	m = 0;
+	n = 0;
+	while (off >= mr->map[m]->segs[n].length) {
+		off -= mr->map[m]->segs[n].length;
+		n++;
+		if (n >= IPATH_SEGSZ) {
+			m++;
+			n = 0;
 		}
 	}
-	sge->vaddr = mr->map[sge->m]->segs[sge->n].vaddr + off;
-	sge->length = mr->map[sge->m]->segs[sge->n].length - off;
+	sge->mr = mr;
+	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+	sge->length = mr->map[m]->segs[n].length - off;
 	sge->sge_length = len;
+	sge->m = m;
+	sge->n = n;
 	ss->sg_list = NULL;
 	ss->num_sge = 1;
 
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index 9ec4ac7..b28c6f8 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -40,8 +41,8 @@
 #include <asm/byteorder.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 /* Acquire before ipath_devs_lock. */
 static DEFINE_MUTEX(ipath_layer_mutex);
@@ -299,9 +300,8 @@
 
 EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
 
-int ipath_set_sps_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
 {
-	ipath_stats.sps_lid[dd->ipath_unit] = arg;
 	dd->ipath_lid = arg;
 	dd->ipath_lmc = lmc;
 
@@ -315,7 +315,7 @@
 	return 0;
 }
 
-EXPORT_SYMBOL_GPL(ipath_set_sps_lid);
+EXPORT_SYMBOL_GPL(ipath_set_lid);
 
 int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
 {
@@ -340,18 +340,26 @@
 
 EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
 
-int ipath_layer_query_device(struct ipath_devdata *dd, u32 * vendor,
-			     u32 * boardrev, u32 * majrev, u32 * minrev)
+u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
 {
-	*vendor = dd->ipath_vendorid;
-	*boardrev = dd->ipath_boardrev;
-	*majrev = dd->ipath_majrev;
-	*minrev = dd->ipath_minrev;
-
-	return 0;
+	return dd->ipath_majrev;
 }
 
-EXPORT_SYMBOL_GPL(ipath_layer_query_device);
+EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
+
+u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
+{
+	return dd->ipath_minrev;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
+
+u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
+{
+	return dd->ipath_pcirev;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
 
 u32 ipath_layer_get_flags(struct ipath_devdata *dd)
 {
@@ -374,6 +382,13 @@
 
 EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
 
+u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
+{
+	return dd->ipath_vendorid;
+}
+
+EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
+
 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
 {
 	return dd->ipath_lastibcstat;
@@ -403,7 +418,7 @@
 	mutex_unlock(&ipath_layer_mutex);
 }
 
-void ipath_layer_del(struct ipath_devdata *dd)
+void ipath_layer_remove(struct ipath_devdata *dd)
 {
 	mutex_lock(&ipath_layer_mutex);
 
@@ -607,7 +622,7 @@
 		goto bail;
 	}
 
-	ret = ipath_setrcvhdrsize(dd, NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE);
+	ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
 
 	if (ret < 0)
 		goto bail;
@@ -616,9 +631,9 @@
 
 	if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
 		intval |= IPATH_LAYER_INT_IF_UP;
-	if (ipath_stats.sps_lid[dd->ipath_unit])
+	if (dd->ipath_lid)
 		intval |= IPATH_LAYER_INT_LID;
-	if (ipath_stats.sps_mlid[dd->ipath_unit])
+	if (dd->ipath_mlid)
 		intval |= IPATH_LAYER_INT_BCAST;
 	/*
 	 * do this on open, in case low level is already up and
@@ -884,7 +899,7 @@
 /**
  * ipath_verbs_send - send a packet from the verbs layer
  * @dd: the infinipath device
- * @hdrwords: the number of works in the header
+ * @hdrwords: the number of words in the header
  * @hdr: the packet header
  * @len: the length of the packet in bytes
  * @ss: the SGE to send
@@ -1016,19 +1031,22 @@
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
 	cntrs->link_error_recovery_counter =
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
+	/*
+	 * The link downed counter counts when the other side downs the
+	 * connection.  We add in the number of times we downed the link
+	 * due to local link integrity errors to compensate.
+	 */
 	cntrs->link_downed_counter =
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
 	cntrs->port_rcv_errors =
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errrcvflowctrlcnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
-		ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlinkcnt) +
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
 	cntrs->port_rcv_remphys_errors =
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
@@ -1042,6 +1060,8 @@
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
 	cntrs->port_rcv_packets =
 		ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+	cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
+	cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
 
 	ret = 0;
 
@@ -1086,10 +1106,10 @@
 		}
 
 	vlsllnh = *((__be16 *) hdr);
-	if (vlsllnh != htons(IPS_LRH_BTH)) {
+	if (vlsllnh != htons(IPATH_LRH_BTH)) {
 		ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
 			  "not sending\n", be16_to_cpu(vlsllnh),
-			  IPS_LRH_BTH);
+			  IPATH_LRH_BTH);
 		ret = -EINVAL;
 	}
 	if (ret)
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
index 6fefd15..7148509 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.h
+++ b/drivers/infiniband/hw/ipath/ipath_layer.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -54,6 +55,8 @@
 	u64 port_rcv_data;
 	u64 port_xmit_packets;
 	u64 port_rcv_packets;
+	u32 local_link_integrity_errors;
+	u32 excessive_buffer_overrun_errors;
 };
 
 /*
@@ -126,7 +129,7 @@
 u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
 int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
 int ipath_layer_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_sps_lid(struct ipath_devdata *, u32, u8);
+int ipath_set_lid(struct ipath_devdata *, u32, u8);
 int ipath_layer_send_hdr(struct ipath_devdata *dd,
 			 struct ether_header *hdr);
 int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
@@ -143,11 +146,13 @@
 int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
 __be64 ipath_layer_get_guid(struct ipath_devdata *);
 u32 ipath_layer_get_nguid(struct ipath_devdata *);
-int ipath_layer_query_device(struct ipath_devdata *, u32 * vendor,
-			     u32 * boardrev, u32 * majrev, u32 * minrev);
+u32 ipath_layer_get_majrev(struct ipath_devdata *);
+u32 ipath_layer_get_minrev(struct ipath_devdata *);
+u32 ipath_layer_get_pcirev(struct ipath_devdata *);
 u32 ipath_layer_get_flags(struct ipath_devdata *dd);
 struct device *ipath_layer_get_device(struct ipath_devdata *dd);
 u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
+u32 ipath_layer_get_vendorid(struct ipath_devdata *);
 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
 u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
 int ipath_layer_enable_timer(struct ipath_devdata *dd);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 1a9d0a2..d340234 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -34,7 +35,7 @@
 
 #include "ipath_kernel.h"
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 #define IB_SMP_UNSUP_VERSION	__constant_htons(0x0004)
 #define IB_SMP_UNSUP_METHOD	__constant_htons(0x0008)
@@ -84,7 +85,7 @@
 {
 	struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
 	struct ipath_devdata *dd = to_idev(ibdev)->dd;
-	u32 vendor, boardid, majrev, minrev;
+	u32 vendor, majrev, minrev;
 
 	if (smp->attr_mod)
 		smp->status |= IB_SMP_INVALID_FIELD;
@@ -104,9 +105,11 @@
 	nip->port_guid = nip->sys_guid;
 	nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
 	nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
-	ipath_layer_query_device(dd, &vendor, &boardid, &majrev, &minrev);
+	majrev = ipath_layer_get_majrev(dd);
+	minrev = ipath_layer_get_minrev(dd);
 	nip->revision = cpu_to_be32((majrev << 16) | minrev);
 	nip->local_port_num = port;
+	vendor = ipath_layer_get_vendorid(dd);
 	nip->vendor_id[0] = 0;
 	nip->vendor_id[1] = vendor >> 8;
 	nip->vendor_id[2] = vendor;
@@ -215,7 +218,7 @@
 	/* P_KeyViolations are counted by hardware. */
 	pip->pkey_violations =
 		cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
-			     dev->n_pkey_violations) & 0xFFFF);
+			     dev->z_pkey_violations) & 0xFFFF);
 	pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
 	/* Only the hardware GUID is supported for now */
 	pip->guid_cap = 1;
@@ -303,9 +306,9 @@
 	lid = be16_to_cpu(pip->lid);
 	if (lid != ipath_layer_get_lid(dev->dd)) {
 		/* Must be a valid unicast LID address. */
-		if (lid == 0 || lid >= IPS_MULTICAST_LID_BASE)
+		if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
 			goto err;
-		ipath_set_sps_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
+		ipath_set_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
 		event.event = IB_EVENT_LID_CHANGE;
 		ib_dispatch_event(&event);
 	}
@@ -313,7 +316,7 @@
 	smlid = be16_to_cpu(pip->sm_lid);
 	if (smlid != dev->sm_lid) {
 		/* Must be a valid unicast LID address. */
-		if (smlid == 0 || smlid >= IPS_MULTICAST_LID_BASE)
+		if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
 			goto err;
 		dev->sm_lid = smlid;
 		event.event = IB_EVENT_SM_CHANGE;
@@ -389,7 +392,7 @@
 	 * later.
 	 */
 	if (pip->pkey_violations == 0)
-		dev->n_pkey_violations =
+		dev->z_pkey_violations =
 			ipath_layer_get_cr_errpkey(dev->dd);
 
 	if (pip->qkey_violations == 0)
@@ -610,6 +613,9 @@
 #define IB_PMA_SEL_PORT_RCV_ERRORS		__constant_htons(0x0008)
 #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS	__constant_htons(0x0010)
 #define IB_PMA_SEL_PORT_XMIT_DISCARDS		__constant_htons(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS	__constant_htons(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS	__constant_htons(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED		__constant_htons(0x0800)
 #define IB_PMA_SEL_PORT_XMIT_DATA		__constant_htons(0x1000)
 #define IB_PMA_SEL_PORT_RCV_DATA		__constant_htons(0x2000)
 #define IB_PMA_SEL_PORT_XMIT_PACKETS		__constant_htons(0x4000)
@@ -844,18 +850,22 @@
 	ipath_layer_get_counters(dev->dd, &cntrs);
 
 	/* Adjust counters for any resets done. */
-	cntrs.symbol_error_counter -= dev->n_symbol_error_counter;
+	cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
 	cntrs.link_error_recovery_counter -=
-		dev->n_link_error_recovery_counter;
-	cntrs.link_downed_counter -= dev->n_link_downed_counter;
+		dev->z_link_error_recovery_counter;
+	cntrs.link_downed_counter -= dev->z_link_downed_counter;
 	cntrs.port_rcv_errors += dev->rcv_errors;
-	cntrs.port_rcv_errors -= dev->n_port_rcv_errors;
-	cntrs.port_rcv_remphys_errors -= dev->n_port_rcv_remphys_errors;
-	cntrs.port_xmit_discards -= dev->n_port_xmit_discards;
-	cntrs.port_xmit_data -= dev->n_port_xmit_data;
-	cntrs.port_rcv_data -= dev->n_port_rcv_data;
-	cntrs.port_xmit_packets -= dev->n_port_xmit_packets;
-	cntrs.port_rcv_packets -= dev->n_port_rcv_packets;
+	cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
+	cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
+	cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
+	cntrs.port_xmit_data -= dev->z_port_xmit_data;
+	cntrs.port_rcv_data -= dev->z_port_rcv_data;
+	cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
+	cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
+	cntrs.local_link_integrity_errors -=
+		dev->z_local_link_integrity_errors;
+	cntrs.excessive_buffer_overrun_errors -=
+		dev->z_excessive_buffer_overrun_errors;
 
 	memset(pmp->data, 0, sizeof(pmp->data));
 
@@ -893,6 +903,16 @@
 	else
 		p->port_xmit_discards =
 			cpu_to_be16((u16)cntrs.port_xmit_discards);
+	if (cntrs.local_link_integrity_errors > 0xFUL)
+		cntrs.local_link_integrity_errors = 0xFUL;
+	if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+		cntrs.excessive_buffer_overrun_errors = 0xFUL;
+	p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+		cntrs.excessive_buffer_overrun_errors;
+	if (dev->n_vl15_dropped > 0xFFFFUL)
+		p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
+	else
+		p->vl15_dropped = cpu_to_be16((u16)dev->n_vl15_dropped);
 	if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
 		p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
 	else
@@ -928,10 +948,10 @@
 				      &rpkts, &xwait);
 
 	/* Adjust counters for any resets done. */
-	swords -= dev->n_port_xmit_data;
-	rwords -= dev->n_port_rcv_data;
-	spkts -= dev->n_port_xmit_packets;
-	rpkts -= dev->n_port_rcv_packets;
+	swords -= dev->z_port_xmit_data;
+	rwords -= dev->z_port_rcv_data;
+	spkts -= dev->z_port_xmit_packets;
+	rpkts -= dev->z_port_rcv_packets;
 
 	memset(pmp->data, 0, sizeof(pmp->data));
 
@@ -967,37 +987,48 @@
 	ipath_layer_get_counters(dev->dd, &cntrs);
 
 	if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
-		dev->n_symbol_error_counter = cntrs.symbol_error_counter;
+		dev->z_symbol_error_counter = cntrs.symbol_error_counter;
 
 	if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
-		dev->n_link_error_recovery_counter =
+		dev->z_link_error_recovery_counter =
 			cntrs.link_error_recovery_counter;
 
 	if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
-		dev->n_link_downed_counter = cntrs.link_downed_counter;
+		dev->z_link_downed_counter = cntrs.link_downed_counter;
 
 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
-		dev->n_port_rcv_errors =
+		dev->z_port_rcv_errors =
 			cntrs.port_rcv_errors + dev->rcv_errors;
 
 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
-		dev->n_port_rcv_remphys_errors =
+		dev->z_port_rcv_remphys_errors =
 			cntrs.port_rcv_remphys_errors;
 
 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
-		dev->n_port_xmit_discards = cntrs.port_xmit_discards;
+		dev->z_port_xmit_discards = cntrs.port_xmit_discards;
+
+	if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
+		dev->z_local_link_integrity_errors =
+			cntrs.local_link_integrity_errors;
+
+	if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
+		dev->z_excessive_buffer_overrun_errors =
+			cntrs.excessive_buffer_overrun_errors;
+
+	if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED)
+		dev->n_vl15_dropped = 0;
 
 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
-		dev->n_port_xmit_data = cntrs.port_xmit_data;
+		dev->z_port_xmit_data = cntrs.port_xmit_data;
 
 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
-		dev->n_port_rcv_data = cntrs.port_rcv_data;
+		dev->z_port_rcv_data = cntrs.port_rcv_data;
 
 	if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
-		dev->n_port_xmit_packets = cntrs.port_xmit_packets;
+		dev->z_port_xmit_packets = cntrs.port_xmit_packets;
 
 	if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
-		dev->n_port_rcv_packets = cntrs.port_rcv_packets;
+		dev->z_port_rcv_packets = cntrs.port_rcv_packets;
 
 	return recv_pma_get_portcounters(pmp, ibdev, port);
 }
@@ -1014,16 +1045,16 @@
 				      &rpkts, &xwait);
 
 	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
-		dev->n_port_xmit_data = swords;
+		dev->z_port_xmit_data = swords;
 
 	if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
-		dev->n_port_rcv_data = rwords;
+		dev->z_port_rcv_data = rwords;
 
 	if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
-		dev->n_port_xmit_packets = spkts;
+		dev->z_port_xmit_packets = spkts;
 
 	if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
-		dev->n_port_rcv_packets = rpkts;
+		dev->z_port_rcv_packets = rpkts;
 
 	if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
 		dev->n_unicast_xmit = 0;
@@ -1272,32 +1303,8 @@
 		      struct ib_wc *in_wc, struct ib_grh *in_grh,
 		      struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-	struct ipath_ibdev *dev = to_idev(ibdev);
 	int ret;
 
-	/*
-	 * Snapshot current HW counters to "clear" them.
-	 * This should be done when the driver is loaded except that for
-	 * some reason we get a zillion errors when brining up the link.
-	 */
-	if (dev->rcv_errors == 0) {
-		struct ipath_layer_counters cntrs;
-
-		ipath_layer_get_counters(to_idev(ibdev)->dd, &cntrs);
-		dev->rcv_errors++;
-		dev->n_symbol_error_counter = cntrs.symbol_error_counter;
-		dev->n_link_error_recovery_counter =
-			cntrs.link_error_recovery_counter;
-		dev->n_link_downed_counter = cntrs.link_downed_counter;
-		dev->n_port_rcv_errors = cntrs.port_rcv_errors + 1;
-		dev->n_port_rcv_remphys_errors =
-			cntrs.port_rcv_remphys_errors;
-		dev->n_port_xmit_discards = cntrs.port_xmit_discards;
-		dev->n_port_xmit_data = cntrs.port_xmit_data;
-		dev->n_port_rcv_data = cntrs.port_rcv_data;
-		dev->n_port_xmit_packets = cntrs.port_xmit_packets;
-		dev->n_port_rcv_packets = cntrs.port_rcv_packets;
-	}
 	switch (in_mad->mad_hdr.mgmt_class) {
 	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
 	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 69ffec6..4ac31a5 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -169,6 +170,11 @@
 	int n, m, i;
 	struct ib_mr *ret;
 
+	if (region->length == 0) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
 	n = 0;
 	list_for_each_entry(chunk, &region->chunk_list, list)
 		n += chunk->nents;
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c
index 02e8c75..b83f66d 100644
--- a/drivers/infiniband/hw/ipath/ipath_pe800.c
+++ b/drivers/infiniband/hw/ipath/ipath_pe800.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -44,7 +45,7 @@
 
 /*
  * This file contains all the chip-specific register information and
- * access functions for the PathScale PE800, the PCI-Express chip.
+ * access functions for the QLogic InfiniPath PE800, the PCI-Express chip.
  *
  * This lists the InfiniPath PE800 registers, in the actual chip layout.
  * This structure should never be directly accessed.
@@ -532,7 +533,7 @@
 	if (n)
 		snprintf(name, namelen, "%s", n);
 
-	if (dd->ipath_majrev != 4 || dd->ipath_minrev != 1) {
+	if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
 		ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n",
 			      dd->ipath_majrev, dd->ipath_minrev);
 		ret = 1;
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 9f8855d9..83e557b 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -34,7 +35,7 @@
 #include <linux/vmalloc.h>
 
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 #define BITS_PER_PAGE		(PAGE_SIZE*BITS_PER_BYTE)
 #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE-1)
@@ -332,10 +333,11 @@
 	qp->remote_qpn = 0;
 	qp->qkey = 0;
 	qp->qp_access_flags = 0;
+	clear_bit(IPATH_S_BUSY, &qp->s_flags);
 	qp->s_hdrwords = 0;
 	qp->s_psn = 0;
 	qp->r_psn = 0;
-	atomic_set(&qp->msn, 0);
+	qp->r_msn = 0;
 	if (qp->ibqp.qp_type == IB_QPT_RC) {
 		qp->s_state = IB_OPCODE_RC_SEND_LAST;
 		qp->r_state = IB_OPCODE_RC_SEND_LAST;
@@ -344,7 +346,8 @@
 		qp->r_state = IB_OPCODE_UC_SEND_LAST;
 	}
 	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-	qp->s_nak_state = 0;
+	qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+	qp->r_nak_state = 0;
 	qp->s_rnr_timeout = 0;
 	qp->s_head = 0;
 	qp->s_tail = 0;
@@ -362,10 +365,10 @@
  * @qp: the QP to put into an error state
  *
  * Flushes both send and receive work queues.
- * QP r_rq.lock and s_lock should be held.
+ * QP s_lock should be held and interrupts disabled.
  */
 
-static void ipath_error_qp(struct ipath_qp *qp)
+void ipath_error_qp(struct ipath_qp *qp)
 {
 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
 	struct ib_wc wc;
@@ -408,12 +411,14 @@
 	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
 
 	wc.opcode = IB_WC_RECV;
+	spin_lock(&qp->r_rq.lock);
 	while (qp->r_rq.tail != qp->r_rq.head) {
 		wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
 		if (++qp->r_rq.tail >= qp->r_rq.size)
 			qp->r_rq.tail = 0;
 		ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
 	}
+	spin_unlock(&qp->r_rq.lock);
 }
 
 /**
@@ -433,8 +438,7 @@
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&qp->r_rq.lock, flags);
-	spin_lock(&qp->s_lock);
+	spin_lock_irqsave(&qp->s_lock, flags);
 
 	cur_state = attr_mask & IB_QP_CUR_STATE ?
 		attr->cur_qp_state : qp->state;
@@ -446,7 +450,7 @@
 
 	if (attr_mask & IB_QP_AV)
 		if (attr->ah_attr.dlid == 0 ||
-		    attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE)
+		    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
 			goto inval;
 
 	if (attr_mask & IB_QP_PKEY_INDEX)
@@ -505,34 +509,19 @@
 	}
 
 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
-		qp->s_min_rnr_timer = attr->min_rnr_timer;
+		qp->r_min_rnr_timer = attr->min_rnr_timer;
 
 	if (attr_mask & IB_QP_QKEY)
 		qp->qkey = attr->qkey;
 
-	if (attr_mask & IB_QP_PKEY_INDEX)
-		qp->s_pkey_index = attr->pkey_index;
-
 	qp->state = new_state;
-	spin_unlock(&qp->s_lock);
-	spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
 
-	/*
-	 * If QP1 changed to the RTS state, try to move to the link to INIT
-	 * even if it was ACTIVE so the SM will reinitialize the SMA's
-	 * state.
-	 */
-	if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) {
-		struct ipath_ibdev *dev = to_idev(ibqp->device);
-
-		ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
-	}
 	ret = 0;
 	goto bail;
 
 inval:
-	spin_unlock(&qp->s_lock);
-	spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+	spin_unlock_irqrestore(&qp->s_lock, flags);
 	ret = -EINVAL;
 
 bail:
@@ -566,7 +555,7 @@
 	attr->sq_draining = 0;
 	attr->max_rd_atomic = 1;
 	attr->max_dest_rd_atomic = 1;
-	attr->min_rnr_timer = qp->s_min_rnr_timer;
+	attr->min_rnr_timer = qp->r_min_rnr_timer;
 	attr->port_num = 1;
 	attr->timeout = 0;
 	attr->retry_cnt = qp->s_retry_cnt;
@@ -593,21 +582,17 @@
  * @qp: the queue pair to compute the AETH for
  *
  * Returns the AETH.
- *
- * The QP s_lock should be held.
  */
 __be32 ipath_compute_aeth(struct ipath_qp *qp)
 {
-	u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK;
+	u32 aeth = qp->r_msn & IPATH_MSN_MASK;
 
-	if (qp->s_nak_state) {
-		aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT;
-	} else if (qp->ibqp.srq) {
+	if (qp->ibqp.srq) {
 		/*
 		 * Shared receive queues don't generate credits.
 		 * Set the credit field to the invalid value.
 		 */
-		aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT;
+		aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
 	} else {
 		u32 min, max, x;
 		u32 credits;
@@ -637,7 +622,7 @@
 			else
 				min = x;
 		}
-		aeth |= x << IPS_AETH_CREDIT_SHIFT;
+		aeth |= x << IPATH_AETH_CREDIT_SHIFT;
 	}
 	return cpu_to_be32(aeth);
 }
@@ -663,12 +648,22 @@
 	size_t sz;
 	struct ib_qp *ret;
 
-	if (init_attr->cap.max_send_sge > 255 ||
-	    init_attr->cap.max_recv_sge > 255) {
+	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
+	    init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
+	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
+	    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
 	}
 
+	if (init_attr->cap.max_send_sge +
+	    init_attr->cap.max_recv_sge +
+	    init_attr->cap.max_send_wr +
+	    init_attr->cap.max_recv_wr == 0) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
 	switch (init_attr->qp_type) {
 	case IB_QPT_UC:
 	case IB_QPT_RC:
@@ -686,18 +681,26 @@
 	case IB_QPT_GSI:
 		qp = kmalloc(sizeof(*qp), GFP_KERNEL);
 		if (!qp) {
+			vfree(swq);
 			ret = ERR_PTR(-ENOMEM);
 			goto bail;
 		}
-		qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
-		sz = sizeof(struct ipath_sge) *
-			init_attr->cap.max_recv_sge +
-			sizeof(struct ipath_rwqe);
-		qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
-		if (!qp->r_rq.wq) {
-			kfree(qp);
-			ret = ERR_PTR(-ENOMEM);
-			goto bail;
+		if (init_attr->srq) {
+			qp->r_rq.size = 0;
+			qp->r_rq.max_sge = 0;
+			qp->r_rq.wq = NULL;
+		} else {
+			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+			sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) +
+				sizeof(struct ipath_rwqe);
+			qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
+			if (!qp->r_rq.wq) {
+				kfree(qp);
+				vfree(swq);
+				ret = ERR_PTR(-ENOMEM);
+				goto bail;
+			}
 		}
 
 		/*
@@ -708,9 +711,7 @@
 		spin_lock_init(&qp->r_rq.lock);
 		atomic_set(&qp->refcount, 0);
 		init_waitqueue_head(&qp->wait);
-		tasklet_init(&qp->s_task,
-			     init_attr->qp_type == IB_QPT_RC ?
-			     ipath_do_rc_send : ipath_do_uc_send,
+		tasklet_init(&qp->s_task, ipath_do_ruc_send,
 			     (unsigned long)qp);
 		INIT_LIST_HEAD(&qp->piowait);
 		INIT_LIST_HEAD(&qp->timerwait);
@@ -718,7 +719,6 @@
 		qp->s_wq = swq;
 		qp->s_size = init_attr->cap.max_send_wr + 1;
 		qp->s_max_sge = init_attr->cap.max_send_sge;
-		qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
 		qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
 			1 << IPATH_S_SIGNAL_REQ_WR : 0;
 		dev = to_idev(ibpd->device);
@@ -888,18 +888,18 @@
  */
 void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
 {
-	u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK;
+	u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
 
 	/*
 	 * If the credit is invalid, we can send
 	 * as many packets as we like.  Otherwise, we have to
 	 * honor the credit field.
 	 */
-	if (credit == IPS_AETH_CREDIT_INVAL) {
+	if (credit == IPATH_AETH_CREDIT_INVAL)
 		qp->s_lsn = (u32) -1;
-	} else if (qp->s_lsn != (u32) -1) {
+	else if (qp->s_lsn != (u32) -1) {
 		/* Compute new LSN (i.e., MSN + credit) */
-		credit = (aeth + credit_table[credit]) & IPS_MSN_MASK;
+		credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
 		if (ipath_cmp24(credit, qp->s_lsn) > 0)
 			qp->s_lsn = credit;
 	}
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 493b182..774d161 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -31,7 +32,7 @@
  */
 
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 /* cut down ridiculously long IB macro names */
 #define OP(x) IB_OPCODE_RC_##x
@@ -41,14 +42,14 @@
  * @qp: the QP who's SGE we're restarting
  * @wqe: the work queue to initialize the QP's SGE from
  *
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
  */
 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
 {
 	struct ipath_ibdev *dev;
 	u32 len;
 
-	len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
+	len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) *
 		ib_mtu_enum_to_int(qp->path_mtu);
 	qp->s_sge.sge = wqe->sg_list[0];
 	qp->s_sge.sg_list = wqe->sg_list + 1;
@@ -72,11 +73,10 @@
  * Return bth0 if constructed; otherwise, return 0.
  * Note the QP s_lock must be held.
  */
-static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
-				    struct ipath_other_headers *ohdr,
-				    u32 pmtu)
+u32 ipath_make_rc_ack(struct ipath_qp *qp,
+		      struct ipath_other_headers *ohdr,
+		      u32 pmtu)
 {
-	struct ipath_sge_state *ss;
 	u32 hwords;
 	u32 len;
 	u32 bth0;
@@ -90,13 +90,12 @@
 	 */
 	switch (qp->s_ack_state) {
 	case OP(RDMA_READ_REQUEST):
-		ss = &qp->s_rdma_sge;
+		qp->s_cur_sge = &qp->s_rdma_sge;
 		len = qp->s_rdma_len;
 		if (len > pmtu) {
 			len = pmtu;
 			qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
-		}
-		else
+		} else
 			qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
 		qp->s_rdma_len -= len;
 		bth0 = qp->s_ack_state << 24;
@@ -108,7 +107,7 @@
 		qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
 		/* FALLTHROUGH */
 	case OP(RDMA_READ_RESPONSE_MIDDLE):
-		ss = &qp->s_rdma_sge;
+		qp->s_cur_sge = &qp->s_rdma_sge;
 		len = qp->s_rdma_len;
 		if (len > pmtu)
 			len = pmtu;
@@ -127,41 +126,50 @@
 		 * We have to prevent new requests from changing
 		 * the r_sge state while a ipath_verbs_send()
 		 * is in progress.
-		 * Changing r_state allows the receiver
-		 * to continue processing new packets.
-		 * We do it here now instead of above so
-		 * that we are sure the packet was sent before
-		 * changing the state.
 		 */
-		qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
 		qp->s_ack_state = OP(ACKNOWLEDGE);
-		return 0;
+		bth0 = 0;
+		goto bail;
 
 	case OP(COMPARE_SWAP):
 	case OP(FETCH_ADD):
-		ss = NULL;
+		qp->s_cur_sge = NULL;
 		len = 0;
-		qp->r_state = OP(SEND_LAST);
-		qp->s_ack_state = OP(ACKNOWLEDGE);
-		bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
+		/*
+		 * Set the s_ack_state so the receive interrupt handler
+		 * won't try to send an ACK (out of order) until this one
+		 * is actually sent.
+		 */
+		qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+		bth0 = OP(ATOMIC_ACKNOWLEDGE) << 24;
 		ohdr->u.at.aeth = ipath_compute_aeth(qp);
-		ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
+		ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
 		hwords += sizeof(ohdr->u.at) / 4;
 		break;
 
 	default:
 		/* Send a regular ACK. */
-		ss = NULL;
+		qp->s_cur_sge = NULL;
 		len = 0;
-		qp->s_ack_state = OP(ACKNOWLEDGE);
-		bth0 = qp->s_ack_state << 24;
-		ohdr->u.aeth = ipath_compute_aeth(qp);
+		/*
+		 * Set the s_ack_state so the receive interrupt handler
+		 * won't try to send an ACK (out of order) until this one
+		 * is actually sent.
+		 */
+		qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+		bth0 = OP(ACKNOWLEDGE) << 24;
+		if (qp->s_nak_state)
+			ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
+						    (qp->s_nak_state <<
+						     IPATH_AETH_CREDIT_SHIFT));
+		else
+			ohdr->u.aeth = ipath_compute_aeth(qp);
 		hwords++;
 	}
 	qp->s_hdrwords = hwords;
-	qp->s_cur_sge = ss;
 	qp->s_cur_size = len;
 
+bail:
 	return bth0;
 }
 
@@ -174,11 +182,11 @@
  * @bth2p: pointer to the BTH PSN word
  *
  * Return 1 if constructed; otherwise, return 0.
- * Note the QP s_lock must be held.
+ * Note the QP s_lock must be held and interrupts disabled.
  */
-static inline int ipath_make_rc_req(struct ipath_qp *qp,
-				    struct ipath_other_headers *ohdr,
-				    u32 pmtu, u32 *bth0p, u32 *bth2p)
+int ipath_make_rc_req(struct ipath_qp *qp,
+		      struct ipath_other_headers *ohdr,
+		      u32 pmtu, u32 *bth0p, u32 *bth2p)
 {
 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
 	struct ipath_sge_state *ss;
@@ -257,7 +265,7 @@
 			break;
 
 		case IB_WR_RDMA_WRITE:
-			if (newreq)
+			if (newreq && qp->s_lsn != (u32) -1)
 				qp->s_lsn++;
 			/* FALLTHROUGH */
 		case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -283,8 +291,7 @@
 			else {
 				qp->s_state =
 					OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-				/* Immediate data comes
-				 * after RETH */
+				/* Immediate data comes after RETH */
 				ohdr->u.rc.imm_data = wqe->wr.imm_data;
 				hwords += 1;
 				if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -304,7 +311,8 @@
 			qp->s_state = OP(RDMA_READ_REQUEST);
 			hwords += sizeof(ohdr->u.rc.reth) / 4;
 			if (newreq) {
-				qp->s_lsn++;
+				if (qp->s_lsn != (u32) -1)
+					qp->s_lsn++;
 				/*
 				 * Adjust s_next_psn to count the
 				 * expected number of responses.
@@ -335,7 +343,8 @@
 				wqe->wr.wr.atomic.compare_add);
 			hwords += sizeof(struct ib_atomic_eth) / 4;
 			if (newreq) {
-				qp->s_lsn++;
+				if (qp->s_lsn != (u32) -1)
+					qp->s_lsn++;
 				wqe->lpsn = wqe->psn;
 			}
 			if (++qp->s_cur == qp->s_size)
@@ -352,9 +361,14 @@
 			if (qp->s_tail >= qp->s_size)
 				qp->s_tail = 0;
 		}
-		bth2 |= qp->s_psn++ & IPS_PSN_MASK;
+		bth2 |= qp->s_psn++ & IPATH_PSN_MASK;
 		if ((int)(qp->s_psn - qp->s_next_psn) > 0)
 			qp->s_next_psn = qp->s_psn;
+		/*
+		 * Put the QP on the pending list so lost ACKs will cause
+		 * a retry.  More than one request can be pending so the
+		 * QP may already be on the dev->pending list.
+		 */
 		spin_lock(&dev->pending_lock);
 		if (list_empty(&qp->timerwait))
 			list_add_tail(&qp->timerwait,
@@ -364,8 +378,8 @@
 
 	case OP(RDMA_READ_RESPONSE_FIRST):
 		/*
-		 * This case can only happen if a send is restarted.  See
-		 * ipath_restart_rc().
+		 * This case can only happen if a send is restarted.
+		 * See ipath_restart_rc().
 		 */
 		ipath_init_restart(qp, wqe);
 		/* FALLTHROUGH */
@@ -373,7 +387,7 @@
 		qp->s_state = OP(SEND_MIDDLE);
 		/* FALLTHROUGH */
 	case OP(SEND_MIDDLE):
-		bth2 = qp->s_psn++ & IPS_PSN_MASK;
+		bth2 = qp->s_psn++ & IPATH_PSN_MASK;
 		if ((int)(qp->s_psn - qp->s_next_psn) > 0)
 			qp->s_next_psn = qp->s_psn;
 		ss = &qp->s_sge;
@@ -415,7 +429,7 @@
 		qp->s_state = OP(RDMA_WRITE_MIDDLE);
 		/* FALLTHROUGH */
 	case OP(RDMA_WRITE_MIDDLE):
-		bth2 = qp->s_psn++ & IPS_PSN_MASK;
+		bth2 = qp->s_psn++ & IPATH_PSN_MASK;
 		if ((int)(qp->s_psn - qp->s_next_psn) > 0)
 			qp->s_next_psn = qp->s_psn;
 		ss = &qp->s_sge;
@@ -452,7 +466,7 @@
 		 * See ipath_restart_rc().
 		 */
 		ipath_init_restart(qp, wqe);
-		len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
+		len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
 		ohdr->u.rc.reth.vaddr =
 			cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
 		ohdr->u.rc.reth.rkey =
@@ -460,7 +474,7 @@
 		ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
 		qp->s_state = OP(RDMA_READ_REQUEST);
 		hwords += sizeof(ohdr->u.rc.reth) / 4;
-		bth2 = qp->s_psn++ & IPS_PSN_MASK;
+		bth2 = qp->s_psn++ & IPATH_PSN_MASK;
 		if ((int)(qp->s_psn - qp->s_next_psn) > 0)
 			qp->s_next_psn = qp->s_psn;
 		ss = NULL;
@@ -496,204 +510,183 @@
 	return 0;
 }
 
-static inline void ipath_make_rc_grh(struct ipath_qp *qp,
-				     struct ib_global_route *grh,
-				     u32 nwords)
-{
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-
-	/* GRH header size in 32-bit words. */
-	qp->s_hdrwords += 10;
-	qp->s_hdr.u.l.grh.version_tclass_flow =
-		cpu_to_be32((6 << 28) |
-			    (grh->traffic_class << 20) |
-			    grh->flow_label);
-	qp->s_hdr.u.l.grh.paylen =
-		cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
-			     SIZE_OF_CRC) << 2);
-	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
-	qp->s_hdr.u.l.grh.next_hdr = 0x1B;
-	qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
-	/* The SGID is 32-bit aligned. */
-	qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
-	qp->s_hdr.u.l.grh.sgid.global.interface_id =
-		ipath_layer_get_guid(dev->dd);
-	qp->s_hdr.u.l.grh.dgid = grh->dgid;
-}
-
 /**
- * ipath_do_rc_send - perform a send on an RC QP
- * @data: contains a pointer to the QP
+ * send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
  *
- * Process entries in the send work queue until credit or queue is
- * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, after we drop the QP s_lock, two threads could send
- * packets out of order.
+ * This is called from ipath_rc_rcv() and only uses the receive
+ * side QP state.
+ * Note that RDMA reads are handled in the send side QP state and tasklet.
  */
-void ipath_do_rc_send(unsigned long data)
-{
-	struct ipath_qp *qp = (struct ipath_qp *)data;
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-	unsigned long flags;
-	u16 lrh0;
-	u32 nwords;
-	u32 extra_bytes;
-	u32 bth0;
-	u32 bth2;
-	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-	struct ipath_other_headers *ohdr;
-
-	if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
-		goto bail;
-
-	if (unlikely(qp->remote_ah_attr.dlid ==
-		     ipath_layer_get_lid(dev->dd))) {
-		struct ib_wc wc;
-
-		/*
-		 * Pass in an uninitialized ib_wc to be consistent with
-		 * other places where ipath_ruc_loopback() is called.
-		 */
-		ipath_ruc_loopback(qp, &wc);
-		goto clear;
-	}
-
-	ohdr = &qp->s_hdr.u.oth;
-	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-		ohdr = &qp->s_hdr.u.l.oth;
-
-again:
-	/* Check for a constructed packet to be sent. */
-	if (qp->s_hdrwords != 0) {
-		/*
-		 * If no PIO bufs are available, return.  An interrupt will
-		 * call ipath_ib_piobufavail() when one is available.
-		 */
-		_VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
-		_VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
-			    qp->s_cur_sge->sg_list,
-			    qp->s_cur_sge->num_sge,
-			    qp->s_cur_sge->sge.vaddr,
-			    qp->s_cur_sge->sge.sge_length,
-			    qp->s_cur_sge->sge.length,
-			    qp->s_cur_sge->sge.m,
-			    qp->s_cur_sge->sge.n);
-		if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
-				     (u32 *) &qp->s_hdr, qp->s_cur_size,
-				     qp->s_cur_sge)) {
-			ipath_no_bufs_available(qp, dev);
-			goto bail;
-		}
-		dev->n_unicast_xmit++;
-		/* Record that we sent the packet and s_hdr is empty. */
-		qp->s_hdrwords = 0;
-	}
-
-	/*
-	 * The lock is needed to synchronize between setting
-	 * qp->s_ack_state, resend timer, and post_send().
-	 */
-	spin_lock_irqsave(&qp->s_lock, flags);
-
-	/* Sending responses has higher priority over sending requests. */
-	if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
-	    (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
-		bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
-	else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
-		goto done;
-
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-
-	/* Construct the header. */
-	extra_bytes = (4 - qp->s_cur_size) & 3;
-	nwords = (qp->s_cur_size + extra_bytes) >> 2;
-	lrh0 = IPS_LRH_BTH;
-	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-		ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
-		lrh0 = IPS_LRH_GRH;
-	}
-	lrh0 |= qp->remote_ah_attr.sl << 4;
-	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
-				       SIZE_OF_CRC);
-	qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
-	bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
-	bth0 |= extra_bytes << 20;
-	ohdr->bth[0] = cpu_to_be32(bth0);
-	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-	ohdr->bth[2] = cpu_to_be32(bth2);
-
-	/* Check for more work to do. */
-	goto again;
-
-done:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-clear:
-	clear_bit(IPATH_S_BUSY, &qp->s_flags);
-bail:
-	return;
-}
-
 static void send_rc_ack(struct ipath_qp *qp)
 {
 	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
 	u16 lrh0;
 	u32 bth0;
+	u32 hwords;
+	struct ipath_ib_header hdr;
 	struct ipath_other_headers *ohdr;
 
 	/* Construct the header. */
-	ohdr = &qp->s_hdr.u.oth;
-	lrh0 = IPS_LRH_BTH;
+	ohdr = &hdr.u.oth;
+	lrh0 = IPATH_LRH_BTH;
 	/* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
-	qp->s_hdrwords = 6;
+	hwords = 6;
 	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-		ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0);
-		ohdr = &qp->s_hdr.u.l.oth;
-		lrh0 = IPS_LRH_GRH;
+		hwords += ipath_make_grh(dev, &hdr.u.l.grh,
+					 &qp->remote_ah_attr.grh,
+					 hwords, 0);
+		ohdr = &hdr.u.l.oth;
+		lrh0 = IPATH_LRH_GRH;
 	}
+	/* read pkey_index w/o lock (its atomic) */
 	bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
-	ohdr->u.aeth = ipath_compute_aeth(qp);
-	if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
-		bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
-		ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
-		qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
-	}
+	if (qp->r_nak_state)
+		ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
+					    (qp->r_nak_state <<
+					     IPATH_AETH_CREDIT_SHIFT));
 	else
+		ohdr->u.aeth = ipath_compute_aeth(qp);
+	if (qp->r_ack_state >= OP(COMPARE_SWAP)) {
+		bth0 |= OP(ATOMIC_ACKNOWLEDGE) << 24;
+		ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->r_atomic_data);
+		hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
+	} else
 		bth0 |= OP(ACKNOWLEDGE) << 24;
 	lrh0 |= qp->remote_ah_attr.sl << 4;
-	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC);
-	qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
+	hdr.lrh[0] = cpu_to_be16(lrh0);
+	hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+	hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+	hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
 	ohdr->bth[0] = cpu_to_be32(bth0);
 	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-	ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
+	ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
 
 	/*
 	 * If we can send the ACK, clear the ACK state.
 	 */
-	if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr,
-			     0, NULL) == 0) {
-		qp->s_ack_state = OP(ACKNOWLEDGE);
-		dev->n_rc_qacks++;
+	if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
+		qp->r_ack_state = OP(ACKNOWLEDGE);
 		dev->n_unicast_xmit++;
+	} else {
+		/*
+		 * We are out of PIO buffers at the moment.
+		 * Pass responsibility for sending the ACK to the
+		 * send tasklet so that when a PIO buffer becomes
+		 * available, the ACK is sent ahead of other outgoing
+		 * packets.
+		 */
+		dev->n_rc_qacks++;
+		spin_lock_irq(&qp->s_lock);
+		/* Don't coalesce if a RDMA read or atomic is pending. */
+		if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
+		    qp->s_ack_state < OP(RDMA_READ_REQUEST)) {
+			qp->s_ack_state = qp->r_ack_state;
+			qp->s_nak_state = qp->r_nak_state;
+			qp->s_ack_psn = qp->r_ack_psn;
+			qp->r_ack_state = OP(ACKNOWLEDGE);
+		}
+		spin_unlock_irq(&qp->s_lock);
+
+		/* Call ipath_do_rc_send() in another thread. */
+		tasklet_hi_schedule(&qp->s_task);
 	}
 }
 
 /**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct ipath_qp *qp, u32 psn)
+{
+	u32 n = qp->s_last;
+	struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
+	u32 opcode;
+
+	qp->s_cur = n;
+
+	/*
+	 * If we are starting the request from the beginning,
+	 * let the normal send code handle initialization.
+	 */
+	if (ipath_cmp24(psn, wqe->psn) <= 0) {
+		qp->s_state = OP(SEND_LAST);
+		goto done;
+	}
+
+	/* Find the work request opcode corresponding to the given PSN. */
+	opcode = wqe->wr.opcode;
+	for (;;) {
+		int diff;
+
+		if (++n == qp->s_size)
+			n = 0;
+		if (n == qp->s_tail)
+			break;
+		wqe = get_swqe_ptr(qp, n);
+		diff = ipath_cmp24(psn, wqe->psn);
+		if (diff < 0)
+			break;
+		qp->s_cur = n;
+		/*
+		 * If we are starting the request from the beginning,
+		 * let the normal send code handle initialization.
+		 */
+		if (diff == 0) {
+			qp->s_state = OP(SEND_LAST);
+			goto done;
+		}
+		opcode = wqe->wr.opcode;
+	}
+
+	/*
+	 * Set the state to restart in the middle of a request.
+	 * Don't change the s_sge, s_cur_sge, or s_cur_size.
+	 * See ipath_do_rc_send().
+	 */
+	switch (opcode) {
+	case IB_WR_SEND:
+	case IB_WR_SEND_WITH_IMM:
+		qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+		break;
+
+	case IB_WR_RDMA_WRITE:
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+		break;
+
+	case IB_WR_RDMA_READ:
+		qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+		break;
+
+	default:
+		/*
+		 * This case shouldn't happen since its only
+		 * one PSN per req.
+		 */
+		qp->s_state = OP(SEND_LAST);
+	}
+done:
+	qp->s_psn = psn;
+}
+
+/**
  * ipath_restart_rc - back up requester to resend the last un-ACKed request
  * @qp: the QP to restart
  * @psn: packet sequence number for the request
  * @wc: the work completion request
  *
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
  */
 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
 {
 	struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
 	struct ipath_ibdev *dev;
-	u32 n;
 
 	/*
 	 * If there are no requests pending, we are done.
@@ -735,62 +728,7 @@
 	else
 		dev->n_rc_resends += (int)qp->s_psn - (int)psn;
 
-	/*
-	 * If we are starting the request from the beginning, let the normal
-	 * send code handle initialization.
-	 */
-	qp->s_cur = qp->s_last;
-	if (ipath_cmp24(psn, wqe->psn) <= 0) {
-		qp->s_state = OP(SEND_LAST);
-		qp->s_psn = wqe->psn;
-	} else {
-		n = qp->s_cur;
-		for (;;) {
-			if (++n == qp->s_size)
-				n = 0;
-			if (n == qp->s_tail) {
-				if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
-					qp->s_cur = n;
-					wqe = get_swqe_ptr(qp, n);
-				}
-				break;
-			}
-			wqe = get_swqe_ptr(qp, n);
-			if (ipath_cmp24(psn, wqe->psn) < 0)
-				break;
-			qp->s_cur = n;
-		}
-		qp->s_psn = psn;
-
-		/*
-		 * Reset the state to restart in the middle of a request.
-		 * Don't change the s_sge, s_cur_sge, or s_cur_size.
-		 * See ipath_do_rc_send().
-		 */
-		switch (wqe->wr.opcode) {
-		case IB_WR_SEND:
-		case IB_WR_SEND_WITH_IMM:
-			qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
-			break;
-
-		case IB_WR_RDMA_WRITE:
-		case IB_WR_RDMA_WRITE_WITH_IMM:
-			qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
-			break;
-
-		case IB_WR_RDMA_READ:
-			qp->s_state =
-				OP(RDMA_READ_RESPONSE_MIDDLE);
-			break;
-
-		default:
-			/*
-			 * This case shouldn't happen since its only
-			 * one PSN per req.
-			 */
-			qp->s_state = OP(SEND_LAST);
-		}
-	}
+	reset_psn(qp, psn);
 
 done:
 	tasklet_hi_schedule(&qp->s_task);
@@ -800,76 +738,14 @@
 }
 
 /**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct ipath_qp *qp, u32 psn)
-{
-	struct ipath_swqe *wqe;
-	u32 n;
-
-	n = qp->s_cur;
-	wqe = get_swqe_ptr(qp, n);
-	for (;;) {
-		if (++n == qp->s_size)
-			n = 0;
-		if (n == qp->s_tail) {
-			if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
-				qp->s_cur = n;
-				wqe = get_swqe_ptr(qp, n);
-			}
-			break;
-		}
-		wqe = get_swqe_ptr(qp, n);
-		if (ipath_cmp24(psn, wqe->psn) < 0)
-			break;
-		qp->s_cur = n;
-	}
-	qp->s_psn = psn;
-
-	/*
-	 * Set the state to restart in the middle of a
-	 * request.  Don't change the s_sge, s_cur_sge, or
-	 * s_cur_size.  See ipath_do_rc_send().
-	 */
-	switch (wqe->wr.opcode) {
-	case IB_WR_SEND:
-	case IB_WR_SEND_WITH_IMM:
-		qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
-		break;
-
-	case IB_WR_RDMA_WRITE:
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
-		break;
-
-	case IB_WR_RDMA_READ:
-		qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-		break;
-
-	default:
-		/*
-		 * This case shouldn't happen since its only
-		 * one PSN per req.
-		 */
-		qp->s_state = OP(SEND_LAST);
-	}
-}
-
-/**
  * do_rc_ack - process an incoming RC ACK
  * @qp: the QP the ACK came in on
  * @psn: the packet sequence number of the ACK
  * @opcode: the opcode of the request that resulted in the ACK
  *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
+ * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
  * for the given QP.
- * Called at interrupt level with the QP s_lock held.
+ * Called at interrupt level with the QP s_lock held and interrupts disabled.
  * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  */
 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
@@ -1006,26 +882,16 @@
 		if (qp->s_last == qp->s_tail)
 			goto bail;
 
-		/* The last valid PSN seen is the previous request's. */
-		qp->s_last_psn = wqe->psn - 1;
+		/* The last valid PSN is the previous PSN. */
+		qp->s_last_psn = psn - 1;
 
 		dev->n_rc_resends += (int)qp->s_psn - (int)psn;
 
-		/*
-		 * If we are starting the request from the beginning, let
-		 * the normal send code handle initialization.
-		 */
-		qp->s_cur = qp->s_last;
-		wqe = get_swqe_ptr(qp, qp->s_cur);
-		if (ipath_cmp24(psn, wqe->psn) <= 0) {
-			qp->s_state = OP(SEND_LAST);
-			qp->s_psn = wqe->psn;
-		} else
-			reset_psn(qp, psn);
+		reset_psn(qp, psn);
 
 		qp->s_rnr_timeout =
-			ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
-					   IPS_AETH_CREDIT_MASK];
+			ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
+					   IPATH_AETH_CREDIT_MASK];
 		ipath_insert_rnr_queue(qp);
 		goto bail;
 
@@ -1033,8 +899,8 @@
 		/* The last valid PSN seen is the previous request's. */
 		if (qp->s_last != qp->s_tail)
 			qp->s_last_psn = wqe->psn - 1;
-		switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
-			IPS_AETH_CREDIT_MASK) {
+		switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
+			IPATH_AETH_CREDIT_MASK) {
 		case 0:	/* PSN sequence error */
 			dev->n_seq_naks++;
 			/*
@@ -1182,32 +1048,33 @@
 			goto ack_done;
 		}
 	rdma_read:
-	if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
-		goto ack_done;
-	if (unlikely(tlen != (hdrsize + pmtu + 4)))
-		goto ack_done;
-	if (unlikely(pmtu >= qp->s_len))
-		goto ack_done;
-	/* We got a response so update the timeout. */
-	if (unlikely(qp->s_last == qp->s_tail ||
-		     get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
-		     IB_WR_RDMA_READ))
-		goto ack_done;
-	spin_lock(&dev->pending_lock);
-	if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
-		list_move_tail(&qp->timerwait,
-			       &dev->pending[dev->pending_index]);
-	spin_unlock(&dev->pending_lock);
-	/*
-	 * Update the RDMA receive state but do the copy w/o holding the
-	 * locks and blocking interrupts.  XXX Yet another place that
-	 * affects relaxed RDMA order since we don't want s_sge modified.
-	 */
-	qp->s_len -= pmtu;
-	qp->s_last_psn = psn;
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	ipath_copy_sge(&qp->s_sge, data, pmtu);
-	goto bail;
+		if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
+			goto ack_done;
+		if (unlikely(tlen != (hdrsize + pmtu + 4)))
+			goto ack_done;
+		if (unlikely(pmtu >= qp->s_len))
+			goto ack_done;
+		/* We got a response so update the timeout. */
+		if (unlikely(qp->s_last == qp->s_tail ||
+			     get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
+			     IB_WR_RDMA_READ))
+			goto ack_done;
+		spin_lock(&dev->pending_lock);
+		if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
+			list_move_tail(&qp->timerwait,
+				       &dev->pending[dev->pending_index]);
+		spin_unlock(&dev->pending_lock);
+		/*
+		 * Update the RDMA receive state but do the copy w/o
+		 * holding the locks and blocking interrupts.
+		 * XXX Yet another place that affects relaxed RDMA order
+		 * since we don't want s_sge modified.
+		 */
+		qp->s_len -= pmtu;
+		qp->s_last_psn = psn;
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+		ipath_copy_sge(&qp->s_sge, data, pmtu);
+		goto bail;
 
 	case OP(RDMA_READ_RESPONSE_LAST):
 		/* ACKs READ req. */
@@ -1230,18 +1097,12 @@
 		 * ICRC (4).
 		 */
 		if (unlikely(tlen <= (hdrsize + pad + 8))) {
-			/*
-			 * XXX Need to generate an error CQ
-			 * entry.
-			 */
+			/* XXX Need to generate an error CQ entry. */
 			goto ack_done;
 		}
 		tlen -= hdrsize + pad + 8;
 		if (unlikely(tlen != qp->s_len)) {
-			/*
-			 * XXX Need to generate an error CQ
-			 * entry.
-			 */
+			/* XXX Need to generate an error CQ entry. */
 			goto ack_done;
 		}
 		if (!header_in_data)
@@ -1254,9 +1115,12 @@
 		if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
 			/*
 			 * Change the state so we contimue
-			 * processing new requests.
+			 * processing new requests and wake up the
+			 * tasklet if there are posted sends.
 			 */
 			qp->s_state = OP(SEND_LAST);
+			if (qp->s_tail != qp->s_head)
+				tasklet_hi_schedule(&qp->s_task);
 		}
 		goto ack_done;
 	}
@@ -1302,18 +1166,16 @@
 		 * Don't queue the NAK if a RDMA read, atomic, or
 		 * NAK is pending though.
 		 */
-		spin_lock(&qp->s_lock);
-		if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
-		     qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
-		    qp->s_nak_state != 0) {
-			spin_unlock(&qp->s_lock);
+		if (qp->s_ack_state != OP(ACKNOWLEDGE) ||
+		    qp->r_nak_state != 0)
 			goto done;
+		if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+			qp->r_ack_state = OP(SEND_ONLY);
+			qp->r_nak_state = IB_NAK_PSN_ERROR;
+			/* Use the expected PSN. */
+			qp->r_ack_psn = qp->r_psn;
 		}
-		qp->s_ack_state = OP(SEND_ONLY);
-		qp->s_nak_state = IB_NAK_PSN_ERROR;
-		/* Use the expected PSN. */
-		qp->s_ack_psn = qp->r_psn;
-		goto resched;
+		goto send_ack;
 	}
 
 	/*
@@ -1327,27 +1189,7 @@
 	 * send the earliest so that RDMA reads can be restarted at
 	 * the requester's expected PSN.
 	 */
-	spin_lock(&qp->s_lock);
-	if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
-	    ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
-		if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
-			qp->s_ack_psn = psn;
-		spin_unlock(&qp->s_lock);
-		goto done;
-	}
-	switch (opcode) {
-	case OP(RDMA_READ_REQUEST):
-		/*
-		 * We have to be careful to not change s_rdma_sge
-		 * while ipath_do_rc_send() is using it and not
-		 * holding the s_lock.
-		 */
-		if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
-		    qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
-			spin_unlock(&qp->s_lock);
-			dev->n_rdma_dup_busy++;
-			goto done;
-		}
+	if (opcode == OP(RDMA_READ_REQUEST)) {
 		/* RETH comes after BTH */
 		if (!header_in_data)
 			reth = &ohdr->u.rc.reth;
@@ -1355,6 +1197,22 @@
 			reth = (struct ib_reth *)data;
 			data += sizeof(*reth);
 		}
+		/*
+		 * If we receive a duplicate RDMA request, it means the
+		 * requester saw a sequence error and needs to restart
+		 * from an earlier point.  We can abort the current
+		 * RDMA read send in that case.
+		 */
+		spin_lock_irq(&qp->s_lock);
+		if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
+		    (qp->s_hdrwords || ipath_cmp24(psn, qp->s_ack_psn) >= 0)) {
+			/*
+			 * We are already sending earlier requested data.
+			 * Don't abort it to send later out of sequence data.
+			 */
+			spin_unlock_irq(&qp->s_lock);
+			goto done;
+		}
 		qp->s_rdma_len = be32_to_cpu(reth->length);
 		if (qp->s_rdma_len != 0) {
 			u32 rkey = be32_to_cpu(reth->rkey);
@@ -1368,8 +1226,10 @@
 			ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
 					   qp->s_rdma_len, vaddr, rkey,
 					   IB_ACCESS_REMOTE_READ);
-			if (unlikely(!ok))
+			if (unlikely(!ok)) {
+				spin_unlock_irq(&qp->s_lock);
 				goto done;
+			}
 		} else {
 			qp->s_rdma_sge.sg_list = NULL;
 			qp->s_rdma_sge.num_sge = 0;
@@ -1378,25 +1238,44 @@
 			qp->s_rdma_sge.sge.length = 0;
 			qp->s_rdma_sge.sge.sge_length = 0;
 		}
-		break;
+		qp->s_ack_state = opcode;
+		qp->s_ack_psn = psn;
+		spin_unlock_irq(&qp->s_lock);
+		tasklet_hi_schedule(&qp->s_task);
+		goto send_ack;
+	}
 
+	/*
+	 * A pending RDMA read will ACK anything before it so
+	 * ignore earlier duplicate requests.
+	 */
+	if (qp->s_ack_state != OP(ACKNOWLEDGE))
+		goto done;
+
+	/*
+	 * If an ACK is pending, don't replace the pending ACK
+	 * with an earlier one since the later one will ACK the earlier.
+	 * Also, if we already have a pending atomic, send it.
+	 */
+	if (qp->r_ack_state != OP(ACKNOWLEDGE) &&
+	    (ipath_cmp24(psn, qp->r_ack_psn) <= 0 ||
+	     qp->r_ack_state >= OP(COMPARE_SWAP)))
+		goto send_ack;
+	switch (opcode) {
 	case OP(COMPARE_SWAP):
 	case OP(FETCH_ADD):
 		/*
-		 * Check for the PSN of the last atomic operations
+		 * Check for the PSN of the last atomic operation
 		 * performed and resend the result if found.
 		 */
-		if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
-			spin_unlock(&qp->s_lock);
+		if ((psn & IPATH_PSN_MASK) != qp->r_atomic_psn)
 			goto done;
-		}
-		qp->s_ack_atomic = qp->r_atomic_data;
 		break;
 	}
-	qp->s_ack_state = opcode;
-	qp->s_nak_state = 0;
-	qp->s_ack_psn = psn;
-resched:
+	qp->r_ack_state = opcode;
+	qp->r_nak_state = 0;
+	qp->r_ack_psn = psn;
+send_ack:
 	return 0;
 
 done:
@@ -1424,7 +1303,6 @@
 	u32 hdrsize;
 	u32 psn;
 	u32 pad;
-	unsigned long flags;
 	struct ib_wc wc;
 	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
 	int diff;
@@ -1453,11 +1331,6 @@
 		} else
 			psn = be32_to_cpu(ohdr->bth[2]);
 	}
-	/*
-	 * The opcode is in the low byte when its in network order
-	 * (top byte when in host order).
-	 */
-	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
 
 	/*
 	 * Process responses (ACKs) before anything else.  Note that the
@@ -1465,22 +1338,21 @@
 	 * queue rather than the expected receive packet sequence number.
 	 * In other words, this QP is the requester.
 	 */
+	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
 	if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
 	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
 		ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
 				  hdrsize, pmtu, header_in_data);
-		goto bail;
+		goto done;
 	}
 
-	spin_lock_irqsave(&qp->r_rq.lock, flags);
-
 	/* Compute 24 bits worth of difference. */
 	diff = ipath_cmp24(psn, qp->r_psn);
 	if (unlikely(diff)) {
 		if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
 				       psn, diff, header_in_data))
 			goto done;
-		goto resched;
+		goto send_ack;
 	}
 
 	/* Check for opcode sequence errors. */
@@ -1492,22 +1364,19 @@
 		    opcode == OP(SEND_LAST_WITH_IMMEDIATE))
 			break;
 	nack_inv:
-	/*
-	 * A NAK will ACK earlier sends and RDMA writes.  Don't queue the
-	 * NAK if a RDMA read, atomic, or NAK is pending though.
-	 */
-	spin_lock(&qp->s_lock);
-	if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
-	    qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
-		spin_unlock(&qp->s_lock);
-		goto done;
-	}
-	/* XXX Flush WQEs */
-	qp->state = IB_QPS_ERR;
-	qp->s_ack_state = OP(SEND_ONLY);
-	qp->s_nak_state = IB_NAK_INVALID_REQUEST;
-	qp->s_ack_psn = qp->r_psn;
-	goto resched;
+		/*
+		 * A NAK will ACK earlier sends and RDMA writes.
+		 * Don't queue the NAK if a RDMA read, atomic, or NAK
+		 * is pending though.
+		 */
+		if (qp->r_ack_state >= OP(COMPARE_SWAP))
+			goto send_ack;
+		/* XXX Flush WQEs */
+		qp->state = IB_QPS_ERR;
+		qp->r_ack_state = OP(SEND_ONLY);
+		qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+		qp->r_ack_psn = qp->r_psn;
+		goto send_ack;
 
 	case OP(RDMA_WRITE_FIRST):
 	case OP(RDMA_WRITE_MIDDLE):
@@ -1517,20 +1386,6 @@
 			break;
 		goto nack_inv;
 
-	case OP(RDMA_READ_REQUEST):
-	case OP(COMPARE_SWAP):
-	case OP(FETCH_ADD):
-		/*
-		 * Drop all new requests until a response has been sent.  A
-		 * new request then ACKs the RDMA response we sent.  Relaxed
-		 * ordering would allow new requests to be processed but we
-		 * would need to keep a queue of rwqe's for all that are in
-		 * progress.  Note that we can't RNR NAK this request since
-		 * the RDMA READ or atomic response is already queued to be
-		 * sent (unless we implement a response send queue).
-		 */
-		goto done;
-
 	default:
 		if (opcode == OP(SEND_MIDDLE) ||
 		    opcode == OP(SEND_LAST) ||
@@ -1539,6 +1394,11 @@
 		    opcode == OP(RDMA_WRITE_LAST) ||
 		    opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
 			goto nack_inv;
+		/*
+		 * Note that it is up to the requester to not send a new
+		 * RDMA read or atomic operation before receiving an ACK
+		 * for the previous operation.
+		 */
 		break;
 	}
 
@@ -1555,17 +1415,12 @@
 			 * Don't queue the NAK if a RDMA read or atomic
 			 * is pending though.
 			 */
-			spin_lock(&qp->s_lock);
-			if (qp->s_ack_state >=
-			    OP(RDMA_READ_REQUEST) &&
-			    qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
-				spin_unlock(&qp->s_lock);
-				goto done;
-			}
-			qp->s_ack_state = OP(SEND_ONLY);
-			qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
-			qp->s_ack_psn = qp->r_psn;
-			goto resched;
+			if (qp->r_ack_state >= OP(COMPARE_SWAP))
+				goto send_ack;
+			qp->r_ack_state = OP(SEND_ONLY);
+			qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+			qp->r_ack_psn = qp->r_psn;
+			goto send_ack;
 		}
 		qp->r_rcv_len = 0;
 		/* FALLTHROUGH */
@@ -1622,7 +1477,7 @@
 		if (unlikely(wc.byte_len > qp->r_len))
 			goto nack_inv;
 		ipath_copy_sge(&qp->r_sge, data, tlen);
-		atomic_inc(&qp->msn);
+		qp->r_msn++;
 		if (opcode == OP(RDMA_WRITE_LAST) ||
 		    opcode == OP(RDMA_WRITE_ONLY))
 			break;
@@ -1666,29 +1521,8 @@
 			ok = ipath_rkey_ok(dev, &qp->r_sge,
 					   qp->r_len, vaddr, rkey,
 					   IB_ACCESS_REMOTE_WRITE);
-			if (unlikely(!ok)) {
-			nack_acc:
-				/*
-				 * A NAK will ACK earlier sends and RDMA
-				 * writes.  Don't queue the NAK if a RDMA
-				 * read, atomic, or NAK is pending though.
-				 */
-				spin_lock(&qp->s_lock);
-				if (qp->s_ack_state >=
-				    OP(RDMA_READ_REQUEST) &&
-				    qp->s_ack_state !=
-				    IB_OPCODE_ACKNOWLEDGE) {
-					spin_unlock(&qp->s_lock);
-					goto done;
-				}
-				/* XXX Flush WQEs */
-				qp->state = IB_QPS_ERR;
-				qp->s_ack_state = OP(RDMA_WRITE_ONLY);
-				qp->s_nak_state =
-					IB_NAK_REMOTE_ACCESS_ERROR;
-				qp->s_ack_psn = qp->r_psn;
-				goto resched;
-			}
+			if (unlikely(!ok))
+				goto nack_acc;
 		} else {
 			qp->r_sge.sg_list = NULL;
 			qp->r_sge.sge.mr = NULL;
@@ -1715,12 +1549,10 @@
 			reth = (struct ib_reth *)data;
 			data += sizeof(*reth);
 		}
-		spin_lock(&qp->s_lock);
-		if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
-		    qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
-			spin_unlock(&qp->s_lock);
-			goto done;
-		}
+		if (unlikely(!(qp->qp_access_flags &
+			       IB_ACCESS_REMOTE_READ)))
+			goto nack_acc;
+		spin_lock_irq(&qp->s_lock);
 		qp->s_rdma_len = be32_to_cpu(reth->length);
 		if (qp->s_rdma_len != 0) {
 			u32 rkey = be32_to_cpu(reth->rkey);
@@ -1732,7 +1564,7 @@
 					   qp->s_rdma_len, vaddr, rkey,
 					   IB_ACCESS_REMOTE_READ);
 			if (unlikely(!ok)) {
-				spin_unlock(&qp->s_lock);
+				spin_unlock_irq(&qp->s_lock);
 				goto nack_acc;
 			}
 			/*
@@ -1749,21 +1581,25 @@
 			qp->s_rdma_sge.sge.length = 0;
 			qp->s_rdma_sge.sge.sge_length = 0;
 		}
-		if (unlikely(!(qp->qp_access_flags &
-			       IB_ACCESS_REMOTE_READ)))
-			goto nack_acc;
 		/*
 		 * We need to increment the MSN here instead of when we
 		 * finish sending the result since a duplicate request would
 		 * increment it more than once.
 		 */
-		atomic_inc(&qp->msn);
+		qp->r_msn++;
+
 		qp->s_ack_state = opcode;
-		qp->s_nak_state = 0;
 		qp->s_ack_psn = psn;
+		spin_unlock_irq(&qp->s_lock);
+
 		qp->r_psn++;
 		qp->r_state = opcode;
-		goto rdmadone;
+		qp->r_nak_state = 0;
+
+		/* Call ipath_do_rc_send() in another thread. */
+		tasklet_hi_schedule(&qp->s_task);
+
+		goto done;
 
 	case OP(COMPARE_SWAP):
 	case OP(FETCH_ADD): {
@@ -1792,7 +1628,7 @@
 			goto nack_acc;
 		/* Perform atomic OP and save result. */
 		sdata = be64_to_cpu(ateth->swap_data);
-		spin_lock(&dev->pending_lock);
+		spin_lock_irq(&dev->pending_lock);
 		qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
 		if (opcode == OP(FETCH_ADD))
 			*(u64 *) qp->r_sge.sge.vaddr =
@@ -1800,9 +1636,9 @@
 		else if (qp->r_atomic_data ==
 			 be64_to_cpu(ateth->compare_data))
 			*(u64 *) qp->r_sge.sge.vaddr = sdata;
-		spin_unlock(&dev->pending_lock);
-		atomic_inc(&qp->msn);
-		qp->r_atomic_psn = psn & IPS_PSN_MASK;
+		spin_unlock_irq(&dev->pending_lock);
+		qp->r_msn++;
+		qp->r_atomic_psn = psn & IPATH_PSN_MASK;
 		psn |= 1 << 31;
 		break;
 	}
@@ -1813,44 +1649,39 @@
 	}
 	qp->r_psn++;
 	qp->r_state = opcode;
+	qp->r_nak_state = 0;
 	/* Send an ACK if requested or required. */
 	if (psn & (1 << 31)) {
 		/*
 		 * Coalesce ACKs unless there is a RDMA READ or
 		 * ATOMIC pending.
 		 */
-		spin_lock(&qp->s_lock);
-		if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
-		    qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
-			qp->s_ack_state = opcode;
-			qp->s_nak_state = 0;
-			qp->s_ack_psn = psn;
-			qp->s_ack_atomic = qp->r_atomic_data;
-			goto resched;
+		if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+			qp->r_ack_state = opcode;
+			qp->r_ack_psn = psn;
 		}
-		spin_unlock(&qp->s_lock);
+		goto send_ack;
 	}
-done:
-	spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-	goto bail;
+	goto done;
 
-resched:
+nack_acc:
 	/*
-	 * Try to send ACK right away but not if ipath_do_rc_send() is
-	 * active.
+	 * A NAK will ACK earlier sends and RDMA writes.
+	 * Don't queue the NAK if a RDMA read, atomic, or NAK
+	 * is pending though.
 	 */
-	if (qp->s_hdrwords == 0 &&
-	    (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
-	     qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
+	if (qp->r_ack_state < OP(COMPARE_SWAP)) {
+		/* XXX Flush WQEs */
+		qp->state = IB_QPS_ERR;
+		qp->r_ack_state = OP(RDMA_WRITE_ONLY);
+		qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+		qp->r_ack_psn = qp->r_psn;
+	}
+send_ack:
+	/* Send ACK right away unless the send tasklet has a pending ACK. */
+	if (qp->s_ack_state == OP(ACKNOWLEDGE))
 		send_rc_ack(qp);
 
-rdmadone:
-	spin_unlock(&qp->s_lock);
-	spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-
-	/* Call ipath_do_rc_send() in another thread. */
-	tasklet_hi_schedule(&qp->s_task);
-
-bail:
+done:
 	return;
 }
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 402126e..89df8f5 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index d38f4f3..772bc59f 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -31,6 +32,7 @@
  */
 
 #include "ipath_verbs.h"
+#include "ipath_common.h"
 
 /*
  * Convert the AETH RNR timeout code into the number of milliseconds.
@@ -111,20 +113,23 @@
  *
  * Return 0 if no RWQE is available, otherwise return 1.
  *
- * Called at interrupt level with the QP r_rq.lock held.
+ * Can be called from interrupt level.
  */
 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
 {
+	unsigned long flags;
 	struct ipath_rq *rq;
 	struct ipath_srq *srq;
 	struct ipath_rwqe *wqe;
-	int ret;
+	int ret = 1;
 
 	if (!qp->ibqp.srq) {
 		rq = &qp->r_rq;
+		spin_lock_irqsave(&rq->lock, flags);
+
 		if (unlikely(rq->tail == rq->head)) {
 			ret = 0;
-			goto bail;
+			goto done;
 		}
 		wqe = get_rwqe_ptr(rq, rq->tail);
 		qp->r_wr_id = wqe->wr_id;
@@ -136,17 +141,16 @@
 		}
 		if (++rq->tail >= rq->size)
 			rq->tail = 0;
-		ret = 1;
-		goto bail;
+		goto done;
 	}
 
 	srq = to_isrq(qp->ibqp.srq);
 	rq = &srq->rq;
-	spin_lock(&rq->lock);
+	spin_lock_irqsave(&rq->lock, flags);
+
 	if (unlikely(rq->tail == rq->head)) {
-		spin_unlock(&rq->lock);
 		ret = 0;
-		goto bail;
+		goto done;
 	}
 	wqe = get_rwqe_ptr(rq, rq->tail);
 	qp->r_wr_id = wqe->wr_id;
@@ -168,18 +172,18 @@
 			n = rq->head - rq->tail;
 		if (n < srq->limit) {
 			srq->limit = 0;
-			spin_unlock(&rq->lock);
+			spin_unlock_irqrestore(&rq->lock, flags);
 			ev.device = qp->ibqp.device;
 			ev.element.srq = qp->ibqp.srq;
 			ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
 			srq->ibsrq.event_handler(&ev,
 						 srq->ibsrq.srq_context);
-		} else
-			spin_unlock(&rq->lock);
-	} else
-		spin_unlock(&rq->lock);
-	ret = 1;
+			goto bail;
+		}
+	}
 
+done:
+	spin_unlock_irqrestore(&rq->lock, flags);
 bail:
 	return ret;
 }
@@ -187,7 +191,6 @@
 /**
  * ipath_ruc_loopback - handle UC and RC lookback requests
  * @sqp: the loopback QP
- * @wc: the work completion entry
  *
  * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
  * forward a WQE addressed to the same HCA.
@@ -196,13 +199,14 @@
  * receive interrupts since this is a connected protocol and all packets
  * will pass through here.
  */
-void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc)
+static void ipath_ruc_loopback(struct ipath_qp *sqp)
 {
 	struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
 	struct ipath_qp *qp;
 	struct ipath_swqe *wqe;
 	struct ipath_sge *sge;
 	unsigned long flags;
+	struct ib_wc wc;
 	u64 sdata;
 
 	qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
@@ -233,8 +237,8 @@
 	wqe = get_swqe_ptr(sqp, sqp->s_last);
 	spin_unlock_irqrestore(&sqp->s_lock, flags);
 
-	wc->wc_flags = 0;
-	wc->imm_data = 0;
+	wc.wc_flags = 0;
+	wc.imm_data = 0;
 
 	sqp->s_sge.sge = wqe->sg_list[0];
 	sqp->s_sge.sg_list = wqe->sg_list + 1;
@@ -242,39 +246,34 @@
 	sqp->s_len = wqe->length;
 	switch (wqe->wr.opcode) {
 	case IB_WR_SEND_WITH_IMM:
-		wc->wc_flags = IB_WC_WITH_IMM;
-		wc->imm_data = wqe->wr.imm_data;
+		wc.wc_flags = IB_WC_WITH_IMM;
+		wc.imm_data = wqe->wr.imm_data;
 		/* FALLTHROUGH */
 	case IB_WR_SEND:
-		spin_lock_irqsave(&qp->r_rq.lock, flags);
 		if (!ipath_get_rwqe(qp, 0)) {
 		rnr_nak:
-			spin_unlock_irqrestore(&qp->r_rq.lock, flags);
 			/* Handle RNR NAK */
 			if (qp->ibqp.qp_type == IB_QPT_UC)
 				goto send_comp;
 			if (sqp->s_rnr_retry == 0) {
-				wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+				wc.status = IB_WC_RNR_RETRY_EXC_ERR;
 				goto err;
 			}
 			if (sqp->s_rnr_retry_cnt < 7)
 				sqp->s_rnr_retry--;
 			dev->n_rnr_naks++;
 			sqp->s_rnr_timeout =
-				ib_ipath_rnr_table[sqp->s_min_rnr_timer];
+				ib_ipath_rnr_table[sqp->r_min_rnr_timer];
 			ipath_insert_rnr_queue(sqp);
 			goto done;
 		}
-		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
 		break;
 
 	case IB_WR_RDMA_WRITE_WITH_IMM:
-		wc->wc_flags = IB_WC_WITH_IMM;
-		wc->imm_data = wqe->wr.imm_data;
-		spin_lock_irqsave(&qp->r_rq.lock, flags);
+		wc.wc_flags = IB_WC_WITH_IMM;
+		wc.imm_data = wqe->wr.imm_data;
 		if (!ipath_get_rwqe(qp, 1))
 			goto rnr_nak;
-		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
 		/* FALLTHROUGH */
 	case IB_WR_RDMA_WRITE:
 		if (wqe->length == 0)
@@ -284,20 +283,20 @@
 					    wqe->wr.wr.rdma.rkey,
 					    IB_ACCESS_REMOTE_WRITE))) {
 		acc_err:
-			wc->status = IB_WC_REM_ACCESS_ERR;
+			wc.status = IB_WC_REM_ACCESS_ERR;
 		err:
-			wc->wr_id = wqe->wr.wr_id;
-			wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-			wc->vendor_err = 0;
-			wc->byte_len = 0;
-			wc->qp_num = sqp->ibqp.qp_num;
-			wc->src_qp = sqp->remote_qpn;
-			wc->pkey_index = 0;
-			wc->slid = sqp->remote_ah_attr.dlid;
-			wc->sl = sqp->remote_ah_attr.sl;
-			wc->dlid_path_bits = 0;
-			wc->port_num = 0;
-			ipath_sqerror_qp(sqp, wc);
+			wc.wr_id = wqe->wr.wr_id;
+			wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+			wc.vendor_err = 0;
+			wc.byte_len = 0;
+			wc.qp_num = sqp->ibqp.qp_num;
+			wc.src_qp = sqp->remote_qpn;
+			wc.pkey_index = 0;
+			wc.slid = sqp->remote_ah_attr.dlid;
+			wc.sl = sqp->remote_ah_attr.sl;
+			wc.dlid_path_bits = 0;
+			wc.port_num = 0;
+			ipath_sqerror_qp(sqp, &wc);
 			goto done;
 		}
 		break;
@@ -373,22 +372,22 @@
 		goto send_comp;
 
 	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
 	else
-		wc->opcode = IB_WC_RECV;
-	wc->wr_id = qp->r_wr_id;
-	wc->status = IB_WC_SUCCESS;
-	wc->vendor_err = 0;
-	wc->byte_len = wqe->length;
-	wc->qp_num = qp->ibqp.qp_num;
-	wc->src_qp = qp->remote_qpn;
+		wc.opcode = IB_WC_RECV;
+	wc.wr_id = qp->r_wr_id;
+	wc.status = IB_WC_SUCCESS;
+	wc.vendor_err = 0;
+	wc.byte_len = wqe->length;
+	wc.qp_num = qp->ibqp.qp_num;
+	wc.src_qp = qp->remote_qpn;
 	/* XXX do we know which pkey matched? Only needed for GSI. */
-	wc->pkey_index = 0;
-	wc->slid = qp->remote_ah_attr.dlid;
-	wc->sl = qp->remote_ah_attr.sl;
-	wc->dlid_path_bits = 0;
+	wc.pkey_index = 0;
+	wc.slid = qp->remote_ah_attr.dlid;
+	wc.sl = qp->remote_ah_attr.sl;
+	wc.dlid_path_bits = 0;
 	/* Signal completion event if the solicited bit is set. */
-	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
+	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		       wqe->wr.send_flags & IB_SEND_SOLICITED);
 
 send_comp:
@@ -396,19 +395,19 @@
 
 	if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
 	    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-		wc->wr_id = wqe->wr.wr_id;
-		wc->status = IB_WC_SUCCESS;
-		wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-		wc->vendor_err = 0;
-		wc->byte_len = wqe->length;
-		wc->qp_num = sqp->ibqp.qp_num;
-		wc->src_qp = 0;
-		wc->pkey_index = 0;
-		wc->slid = 0;
-		wc->sl = 0;
-		wc->dlid_path_bits = 0;
-		wc->port_num = 0;
-		ipath_cq_enter(to_icq(sqp->ibqp.send_cq), wc, 0);
+		wc.wr_id = wqe->wr.wr_id;
+		wc.status = IB_WC_SUCCESS;
+		wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+		wc.vendor_err = 0;
+		wc.byte_len = wqe->length;
+		wc.qp_num = sqp->ibqp.qp_num;
+		wc.src_qp = 0;
+		wc.pkey_index = 0;
+		wc.slid = 0;
+		wc.sl = 0;
+		wc.dlid_path_bits = 0;
+		wc.port_num = 0;
+		ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0);
 	}
 
 	/* Update s_last now that we are finished with the SWQE */
@@ -454,11 +453,11 @@
 }
 
 /**
- * ipath_post_rc_send - post RC and UC sends
+ * ipath_post_ruc_send - post RC and UC sends
  * @qp: the QP to post on
  * @wr: the work request to send
  */
-int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
+int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
 {
 	struct ipath_swqe *wqe;
 	unsigned long flags;
@@ -533,13 +532,149 @@
 	qp->s_head = next;
 	spin_unlock_irqrestore(&qp->s_lock, flags);
 
-	if (qp->ibqp.qp_type == IB_QPT_UC)
-		ipath_do_uc_send((unsigned long) qp);
-	else
-		ipath_do_rc_send((unsigned long) qp);
+	ipath_do_ruc_send((unsigned long) qp);
 
 	ret = 0;
 
 bail:
 	return ret;
 }
+
+/**
+ * ipath_make_grh - construct a GRH header
+ * @dev: a pointer to the ipath device
+ * @hdr: a pointer to the GRH header being constructed
+ * @grh: the global route address to send to
+ * @hwords: the number of 32 bit words of header being sent
+ * @nwords: the number of 32 bit words of data being sent
+ *
+ * Return the size of the header in 32 bit words.
+ */
+u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
+		   struct ib_global_route *grh, u32 hwords, u32 nwords)
+{
+	hdr->version_tclass_flow =
+		cpu_to_be32((6 << 28) |
+			    (grh->traffic_class << 20) |
+			    grh->flow_label);
+	hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
+	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
+	hdr->next_hdr = 0x1B;
+	hdr->hop_limit = grh->hop_limit;
+	/* The SGID is 32-bit aligned. */
+	hdr->sgid.global.subnet_prefix = dev->gid_prefix;
+	hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd);
+	hdr->dgid = grh->dgid;
+
+	/* GRH header size in 32-bit words. */
+	return sizeof(struct ib_grh) / sizeof(u32);
+}
+
+/**
+ * ipath_do_ruc_send - perform a send on an RC or UC QP
+ * @data: contains a pointer to the QP
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, after we drop the QP s_lock, two threads could send
+ * packets out of order.
+ */
+void ipath_do_ruc_send(unsigned long data)
+{
+	struct ipath_qp *qp = (struct ipath_qp *)data;
+	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+	unsigned long flags;
+	u16 lrh0;
+	u32 nwords;
+	u32 extra_bytes;
+	u32 bth0;
+	u32 bth2;
+	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+	struct ipath_other_headers *ohdr;
+
+	if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
+		goto bail;
+
+	if (unlikely(qp->remote_ah_attr.dlid ==
+		     ipath_layer_get_lid(dev->dd))) {
+		ipath_ruc_loopback(qp);
+		goto clear;
+	}
+
+	ohdr = &qp->s_hdr.u.oth;
+	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+		ohdr = &qp->s_hdr.u.l.oth;
+
+again:
+	/* Check for a constructed packet to be sent. */
+	if (qp->s_hdrwords != 0) {
+		/*
+		 * If no PIO bufs are available, return.  An interrupt will
+		 * call ipath_ib_piobufavail() when one is available.
+		 */
+		if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
+				     (u32 *) &qp->s_hdr, qp->s_cur_size,
+				     qp->s_cur_sge)) {
+			ipath_no_bufs_available(qp, dev);
+			goto bail;
+		}
+		dev->n_unicast_xmit++;
+		/* Record that we sent the packet and s_hdr is empty. */
+		qp->s_hdrwords = 0;
+	}
+
+	/*
+	 * The lock is needed to synchronize between setting
+	 * qp->s_ack_state, resend timer, and post_send().
+	 */
+	spin_lock_irqsave(&qp->s_lock, flags);
+
+	/* Sending responses has higher priority over sending requests. */
+	if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE &&
+	    (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
+		bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK;
+	else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
+		   ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
+		   ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
+		/*
+		 * Clear the busy bit before unlocking to avoid races with
+		 * adding new work queue items and then failing to process
+		 * them.
+		 */
+		clear_bit(IPATH_S_BUSY, &qp->s_flags);
+		spin_unlock_irqrestore(&qp->s_lock, flags);
+		goto bail;
+	}
+
+	spin_unlock_irqrestore(&qp->s_lock, flags);
+
+	/* Construct the header. */
+	extra_bytes = (4 - qp->s_cur_size) & 3;
+	nwords = (qp->s_cur_size + extra_bytes) >> 2;
+	lrh0 = IPATH_LRH_BTH;
+	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+		qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
+						 &qp->remote_ah_attr.grh,
+						 qp->s_hdrwords, nwords);
+		lrh0 = IPATH_LRH_GRH;
+	}
+	lrh0 |= qp->remote_ah_attr.sl << 4;
+	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
+				       SIZE_OF_CRC);
+	qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
+	bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+	bth0 |= extra_bytes << 20;
+	ohdr->bth[0] = cpu_to_be32(bth0);
+	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+	ohdr->bth[2] = cpu_to_be32(bth2);
+
+	/* Check for more work to do. */
+	goto again;
+
+clear:
+	clear_bit(IPATH_S_BUSY, &qp->s_flags);
+bail:
+	return;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 01c4c6c..f760434 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -125,11 +126,23 @@
 				struct ib_srq_init_attr *srq_init_attr,
 				struct ib_udata *udata)
 {
+	struct ipath_ibdev *dev = to_idev(ibpd->device);
 	struct ipath_srq *srq;
 	u32 sz;
 	struct ib_srq *ret;
 
-	if (srq_init_attr->attr.max_sge < 1) {
+	if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
+	if (srq_init_attr->attr.max_wr == 0) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
+	if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
+	    (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
 		ret = ERR_PTR(-EINVAL);
 		goto bail;
 	}
@@ -164,6 +177,8 @@
 
 	ret = &srq->ibsrq;
 
+	dev->n_srqs_allocated++;
+
 bail:
 	return ret;
 }
@@ -181,24 +196,26 @@
 	unsigned long flags;
 	int ret;
 
-	if (attr_mask & IB_SRQ_LIMIT) {
-		spin_lock_irqsave(&srq->rq.lock, flags);
-		srq->limit = attr->srq_limit;
-		spin_unlock_irqrestore(&srq->rq.lock, flags);
-	}
-	if (attr_mask & IB_SRQ_MAX_WR) {
-		u32 size = attr->max_wr + 1;
-		struct ipath_rwqe *wq, *p;
-		u32 n;
-		u32 sz;
-
-		if (attr->max_sge < srq->rq.max_sge) {
+	if (attr_mask & IB_SRQ_MAX_WR)
+		if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
+		    (attr->max_sge > srq->rq.max_sge)) {
 			ret = -EINVAL;
 			goto bail;
 		}
 
+	if (attr_mask & IB_SRQ_LIMIT)
+		if (attr->srq_limit >= srq->rq.size) {
+			ret = -EINVAL;
+			goto bail;
+		}
+
+	if (attr_mask & IB_SRQ_MAX_WR) {
+		struct ipath_rwqe *wq, *p;
+		u32 sz, size, n;
+
 		sz = sizeof(struct ipath_rwqe) +
 			attr->max_sge * sizeof(struct ipath_sge);
+		size = attr->max_wr + 1;
 		wq = vmalloc(size * sz);
 		if (!wq) {
 			ret = -ENOMEM;
@@ -242,6 +259,11 @@
 		spin_unlock_irqrestore(&srq->rq.lock, flags);
 	}
 
+	if (attr_mask & IB_SRQ_LIMIT) {
+		spin_lock_irqsave(&srq->rq.lock, flags);
+		srq->limit = attr->srq_limit;
+		spin_unlock_irqrestore(&srq->rq.lock, flags);
+	}
 	ret = 0;
 
 bail:
@@ -265,7 +287,9 @@
 int ipath_destroy_srq(struct ib_srq *ibsrq)
 {
 	struct ipath_srq *srq = to_isrq(ibsrq);
+	struct ipath_ibdev *dev = to_idev(ibsrq->device);
 
+	dev->n_srqs_allocated--;
 	vfree(srq->rq.wq);
 	kfree(srq);
 
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index fe20913..70351b7 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -185,7 +186,6 @@
 				   dd->ipath_port0head,
 				   (unsigned long long)
 				   ipath_stats.sps_port0pkts);
-			ipath_kreceive(dd);
 		}
 		dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
 	}
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index f323791..b98821d 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -34,8 +35,8 @@
 #include <linux/pci.h>
 
 #include "ipath_kernel.h"
-#include "ips_common.h"
 #include "ipath_layer.h"
+#include "ipath_common.h"
 
 /**
  * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
@@ -84,99 +85,6 @@
 			 ipath_count_units(NULL, NULL, NULL));
 }
 
-#define DRIVER_STAT(name, attr) \
-	static ssize_t show_stat_##name(struct device_driver *dev, \
-					char *buf) \
-	{ \
-		return scnprintf( \
-			buf, PAGE_SIZE, "%llu\n", \
-			(unsigned long long) ipath_stats.sps_ ##attr); \
-	} \
-	static DRIVER_ATTR(name, S_IRUGO, show_stat_##name, NULL)
-
-DRIVER_STAT(intrs, ints);
-DRIVER_STAT(err_intrs, errints);
-DRIVER_STAT(errs, errs);
-DRIVER_STAT(pkt_errs, pkterrs);
-DRIVER_STAT(crc_errs, crcerrs);
-DRIVER_STAT(hw_errs, hwerrs);
-DRIVER_STAT(ib_link, iblink);
-DRIVER_STAT(port0_pkts, port0pkts);
-DRIVER_STAT(ether_spkts, ether_spkts);
-DRIVER_STAT(ether_rpkts, ether_rpkts);
-DRIVER_STAT(sma_spkts, sma_spkts);
-DRIVER_STAT(sma_rpkts, sma_rpkts);
-DRIVER_STAT(hdrq_full, hdrqfull);
-DRIVER_STAT(etid_full, etidfull);
-DRIVER_STAT(no_piobufs, nopiobufs);
-DRIVER_STAT(ports, ports);
-DRIVER_STAT(pkey0, pkeys[0]);
-DRIVER_STAT(pkey1, pkeys[1]);
-DRIVER_STAT(pkey2, pkeys[2]);
-DRIVER_STAT(pkey3, pkeys[3]);
-/* XXX fix the following when dynamic table of devices used */
-DRIVER_STAT(lid0, lid[0]);
-DRIVER_STAT(lid1, lid[1]);
-DRIVER_STAT(lid2, lid[2]);
-DRIVER_STAT(lid3, lid[3]);
-
-DRIVER_STAT(nports, nports);
-DRIVER_STAT(null_intr, nullintr);
-DRIVER_STAT(max_pkts_call, maxpkts_call);
-DRIVER_STAT(avg_pkts_call, avgpkts_call);
-DRIVER_STAT(page_locks, pagelocks);
-DRIVER_STAT(page_unlocks, pageunlocks);
-DRIVER_STAT(krdrops, krdrops);
-/* XXX fix the following when dynamic table of devices used */
-DRIVER_STAT(mlid0, mlid[0]);
-DRIVER_STAT(mlid1, mlid[1]);
-DRIVER_STAT(mlid2, mlid[2]);
-DRIVER_STAT(mlid3, mlid[3]);
-
-static struct attribute *driver_stat_attributes[] = {
-	&driver_attr_intrs.attr,
-	&driver_attr_err_intrs.attr,
-	&driver_attr_errs.attr,
-	&driver_attr_pkt_errs.attr,
-	&driver_attr_crc_errs.attr,
-	&driver_attr_hw_errs.attr,
-	&driver_attr_ib_link.attr,
-	&driver_attr_port0_pkts.attr,
-	&driver_attr_ether_spkts.attr,
-	&driver_attr_ether_rpkts.attr,
-	&driver_attr_sma_spkts.attr,
-	&driver_attr_sma_rpkts.attr,
-	&driver_attr_hdrq_full.attr,
-	&driver_attr_etid_full.attr,
-	&driver_attr_no_piobufs.attr,
-	&driver_attr_ports.attr,
-	&driver_attr_pkey0.attr,
-	&driver_attr_pkey1.attr,
-	&driver_attr_pkey2.attr,
-	&driver_attr_pkey3.attr,
-	&driver_attr_lid0.attr,
-	&driver_attr_lid1.attr,
-	&driver_attr_lid2.attr,
-	&driver_attr_lid3.attr,
-	&driver_attr_nports.attr,
-	&driver_attr_null_intr.attr,
-	&driver_attr_max_pkts_call.attr,
-	&driver_attr_avg_pkts_call.attr,
-	&driver_attr_page_locks.attr,
-	&driver_attr_page_unlocks.attr,
-	&driver_attr_krdrops.attr,
-	&driver_attr_mlid0.attr,
-	&driver_attr_mlid1.attr,
-	&driver_attr_mlid2.attr,
-	&driver_attr_mlid3.attr,
-	NULL
-};
-
-static struct attribute_group driver_stat_attr_group = {
-	.name = "stats",
-	.attrs = driver_stat_attributes
-};
-
 static ssize_t show_status(struct device *dev,
 			   struct device_attribute *attr,
 			   char *buf)
@@ -272,23 +180,23 @@
 			  size_t count)
 {
 	struct ipath_devdata *dd = dev_get_drvdata(dev);
-	u16 lid;
+	u16 lid = 0;
 	int ret;
 
 	ret = ipath_parse_ushort(buf, &lid);
 	if (ret < 0)
 		goto invalid;
 
-	if (lid == 0 || lid >= 0xc000) {
+	if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) {
 		ret = -EINVAL;
 		goto invalid;
 	}
 
-	ipath_set_sps_lid(dd, lid, 0);
+	ipath_set_lid(dd, lid, 0);
 
 	goto bail;
 invalid:
-	ipath_dev_err(dd, "attempt to set invalid LID\n");
+	ipath_dev_err(dd, "attempt to set invalid LID 0x%x\n", lid);
 bail:
 	return ret;
 }
@@ -313,13 +221,12 @@
 	int ret;
 
 	ret = ipath_parse_ushort(buf, &mlid);
-	if (ret < 0)
+	if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
 		goto invalid;
 
 	unit = dd->ipath_unit;
 
 	dd->ipath_mlid = mlid;
-	ipath_stats.sps_mlid[unit] = mlid;
 	ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST);
 
 	goto bail;
@@ -734,20 +641,12 @@
 	int ret;
 
 	ret = sysfs_create_group(&drv->kobj, &driver_attr_group);
-	if (ret)
-		goto bail;
 
-	ret = sysfs_create_group(&drv->kobj, &driver_stat_attr_group);
-	if (ret)
-		sysfs_remove_group(&drv->kobj, &driver_attr_group);
-
-bail:
 	return ret;
 }
 
 void ipath_driver_remove_group(struct device_driver *drv)
 {
-	sysfs_remove_group(&drv->kobj, &driver_stat_attr_group);
 	sysfs_remove_group(&drv->kobj, &driver_attr_group);
 }
 
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 0d6dbc0..c33abea 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -31,7 +32,7 @@
  */
 
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 /* cut down ridiculously long IB macro names */
 #define OP(x) IB_OPCODE_UC_##x
@@ -61,90 +62,40 @@
 }
 
 /**
- * ipath_do_uc_send - do a send on a UC queue
- * @data: contains a pointer to the QP to send on
+ * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ * @bth0p: pointer to the BTH opcode word
+ * @bth2p: pointer to the BTH PSN word
  *
- * Process entries in the send work queue until the queue is exhausted.
- * Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, after we drop the QP lock, two threads could send
- * packets out of order.
- * This is similar to ipath_do_rc_send() below except we don't have
- * timeouts or resends.
+ * Return 1 if constructed; otherwise, return 0.
+ * Note the QP s_lock must be held and interrupts disabled.
  */
-void ipath_do_uc_send(unsigned long data)
+int ipath_make_uc_req(struct ipath_qp *qp,
+		      struct ipath_other_headers *ohdr,
+		      u32 pmtu, u32 *bth0p, u32 *bth2p)
 {
-	struct ipath_qp *qp = (struct ipath_qp *)data;
-	struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
 	struct ipath_swqe *wqe;
-	unsigned long flags;
-	u16 lrh0;
 	u32 hwords;
-	u32 nwords;
-	u32 extra_bytes;
 	u32 bth0;
-	u32 bth2;
-	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
 	u32 len;
-	struct ipath_other_headers *ohdr;
 	struct ib_wc wc;
 
-	if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
-		goto bail;
-
-	if (unlikely(qp->remote_ah_attr.dlid ==
-		     ipath_layer_get_lid(dev->dd))) {
-		/* Pass in an uninitialized ib_wc to save stack space. */
-		ipath_ruc_loopback(qp, &wc);
-		clear_bit(IPATH_S_BUSY, &qp->s_flags);
-		goto bail;
-	}
-
-	ohdr = &qp->s_hdr.u.oth;
-	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-		ohdr = &qp->s_hdr.u.l.oth;
-
-again:
-	/* Check for a constructed packet to be sent. */
-	if (qp->s_hdrwords != 0) {
-			/*
-			 * If no PIO bufs are available, return.
-			 * An interrupt will call ipath_ib_piobufavail()
-			 * when one is available.
-			 */
-			if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
-					     (u32 *) &qp->s_hdr,
-					     qp->s_cur_size,
-					     qp->s_cur_sge)) {
-				ipath_no_bufs_available(qp, dev);
-				goto bail;
-			}
-			dev->n_unicast_xmit++;
-		/* Record that we sent the packet and s_hdr is empty. */
-		qp->s_hdrwords = 0;
-	}
-
-	lrh0 = IPS_LRH_BTH;
-	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
-	hwords = 5;
-
-	/*
-	 * The lock is needed to synchronize between
-	 * setting qp->s_ack_state and post_send().
-	 */
-	spin_lock_irqsave(&qp->s_lock, flags);
-
 	if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
 		goto done;
 
-	bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
+	hwords = 5;
+	bth0 = 0;
 
-	/* Send a request. */
+	/* Get the next send request. */
 	wqe = get_swqe_ptr(qp, qp->s_last);
 	switch (qp->s_state) {
 	default:
 		/*
-		 * Signal the completion of the last send (if there is
-		 * one).
+		 * Signal the completion of the last send
+		 * (if there is one).
 		 */
 		if (qp->s_last != qp->s_tail)
 			complete_last_send(qp, wqe, &wc);
@@ -257,61 +208,16 @@
 		}
 		break;
 	}
-	bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
 	qp->s_len -= len;
-	bth0 |= qp->s_state << 24;
-
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-
-	/* Construct the header. */
-	extra_bytes = (4 - len) & 3;
-	nwords = (len + extra_bytes) >> 2;
-	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-		/* Header size in 32-bit words. */
-		hwords += 10;
-		lrh0 = IPS_LRH_GRH;
-		qp->s_hdr.u.l.grh.version_tclass_flow =
-			cpu_to_be32((6 << 28) |
-				    (qp->remote_ah_attr.grh.traffic_class
-				     << 20) |
-				    qp->remote_ah_attr.grh.flow_label);
-		qp->s_hdr.u.l.grh.paylen =
-			cpu_to_be16(((hwords - 12) + nwords +
-				     SIZE_OF_CRC) << 2);
-		/* next_hdr is defined by C8-7 in ch. 8.4.1 */
-		qp->s_hdr.u.l.grh.next_hdr = 0x1B;
-		qp->s_hdr.u.l.grh.hop_limit =
-			qp->remote_ah_attr.grh.hop_limit;
-		/* The SGID is 32-bit aligned. */
-		qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
-			dev->gid_prefix;
-		qp->s_hdr.u.l.grh.sgid.global.interface_id =
-			ipath_layer_get_guid(dev->dd);
-		qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
-	}
 	qp->s_hdrwords = hwords;
 	qp->s_cur_sge = &qp->s_sge;
 	qp->s_cur_size = len;
-	lrh0 |= qp->remote_ah_attr.sl << 4;
-	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-	/* DEST LID */
-	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-	qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
-	qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
-	bth0 |= extra_bytes << 20;
-	ohdr->bth[0] = cpu_to_be32(bth0);
-	ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-	ohdr->bth[2] = cpu_to_be32(bth2);
-
-	/* Check for more work to do. */
-	goto again;
+	*bth0p = bth0 | (qp->s_state << 24);
+	*bth2p = qp->s_next_psn++ & IPATH_PSN_MASK;
+	return 1;
 
 done:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	clear_bit(IPATH_S_BUSY, &qp->s_flags);
-
-bail:
-	return;
+	return 0;
 }
 
 /**
@@ -335,7 +241,6 @@
 	u32 hdrsize;
 	u32 psn;
 	u32 pad;
-	unsigned long flags;
 	struct ib_wc wc;
 	u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
 	struct ib_reth *reth;
@@ -373,8 +278,6 @@
 	wc.imm_data = 0;
 	wc.wc_flags = 0;
 
-	spin_lock_irqsave(&qp->r_rq.lock, flags);
-
 	/* Compare the PSN verses the expected PSN. */
 	if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
 		/*
@@ -535,12 +438,13 @@
 		if (qp->r_len != 0) {
 			u32 rkey = be32_to_cpu(reth->rkey);
 			u64 vaddr = be64_to_cpu(reth->vaddr);
+			int ok;
 
 			/* Check rkey */
-			if (unlikely(!ipath_rkey_ok(
-					     dev, &qp->r_sge, qp->r_len,
-					     vaddr, rkey,
-					     IB_ACCESS_REMOTE_WRITE))) {
+			ok = ipath_rkey_ok(dev, &qp->r_sge, qp->r_len,
+					   vaddr, rkey,
+					   IB_ACCESS_REMOTE_WRITE);
+			if (unlikely(!ok)) {
 				dev->n_pkt_drops++;
 				goto done;
 			}
@@ -558,8 +462,7 @@
 		}
 		if (opcode == OP(RDMA_WRITE_ONLY))
 			goto rdma_last;
-		else if (opcode ==
-			 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+		else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
 			goto rdma_last_imm;
 		/* FALLTHROUGH */
 	case OP(RDMA_WRITE_MIDDLE):
@@ -592,9 +495,9 @@
 			dev->n_pkt_drops++;
 			goto done;
 		}
-		if (qp->r_reuse_sge) {
+		if (qp->r_reuse_sge)
 			qp->r_reuse_sge = 0;
-		} else if (!ipath_get_rwqe(qp, 1)) {
+		else if (!ipath_get_rwqe(qp, 1)) {
 			dev->n_pkt_drops++;
 			goto done;
 		}
@@ -631,15 +534,11 @@
 
 	default:
 		/* Drop packet for unknown opcodes. */
-		spin_unlock_irqrestore(&qp->r_rq.lock, flags);
 		dev->n_pkt_drops++;
-		goto bail;
+		goto done;
 	}
 	qp->r_psn++;
 	qp->r_state = opcode;
 done:
-	spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-
-bail:
 	return;
 }
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index e606daf..3466129 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -33,7 +34,7 @@
 #include <rdma/ib_smi.h>
 
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 /**
  * ipath_ud_loopback - handle send on loopback QPs
@@ -274,6 +275,11 @@
 		len += wr->sg_list[i].length;
 		ss.num_sge++;
 	}
+	/* Check for invalid packet size. */
+	if (len > ipath_layer_get_ibmtu(dev->dd)) {
+		ret = -EINVAL;
+		goto bail;
+	}
 	extra_bytes = (4 - len) & 3;
 	nwords = (len + extra_bytes) >> 2;
 
@@ -283,8 +289,8 @@
 		ret = -EINVAL;
 		goto bail;
 	}
-	if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE) {
-		if (ah_attr->dlid != IPS_PERMISSIVE_LID)
+	if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
+		if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
 			dev->n_multicast_xmit++;
 		else
 			dev->n_unicast_xmit++;
@@ -304,7 +310,7 @@
 	if (ah_attr->ah_flags & IB_AH_GRH) {
 		/* Header size in 32-bit words. */
 		hwords = 17;
-		lrh0 = IPS_LRH_GRH;
+		lrh0 = IPATH_LRH_GRH;
 		ohdr = &qp->s_hdr.u.l.oth;
 		qp->s_hdr.u.l.grh.version_tclass_flow =
 			cpu_to_be32((6 << 28) |
@@ -330,7 +336,7 @@
 	} else {
 		/* Header size in 32-bit words. */
 		hwords = 7;
-		lrh0 = IPS_LRH_BTH;
+		lrh0 = IPATH_LRH_BTH;
 		ohdr = &qp->s_hdr.u.oth;
 	}
 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
@@ -361,18 +367,18 @@
 	if (wr->send_flags & IB_SEND_SOLICITED)
 		bth0 |= 1 << 23;
 	bth0 |= extra_bytes << 20;
-	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPS_DEFAULT_P_KEY :
+	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
 		ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
 	ohdr->bth[0] = cpu_to_be32(bth0);
 	/*
 	 * Use the multicast QP if the destination LID is a multicast LID.
 	 */
-	ohdr->bth[1] = ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
-		ah_attr->dlid != IPS_PERMISSIVE_LID ?
-		__constant_cpu_to_be32(IPS_MULTICAST_QPN) :
+	ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
+		ah_attr->dlid != IPATH_PERMISSIVE_LID ?
+		__constant_cpu_to_be32(IPATH_MULTICAST_QPN) :
 		cpu_to_be32(wr->wr.ud.remote_qpn);
 	/* XXX Could lose a PSN count but not worth locking */
-	ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPS_PSN_MASK);
+	ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
 	/*
 	 * Qkeys with the high order bit set mean use the
 	 * qkey from the QP context instead of the WR (see 10.2.5).
@@ -463,7 +469,7 @@
 			src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
 		}
 	}
-	src_qp &= IPS_QPN_MASK;
+	src_qp &= IPATH_QPN_MASK;
 
 	/*
 	 * Check that the permissive LID is only used on QP0
@@ -554,7 +560,16 @@
 	spin_lock_irqsave(&rq->lock, flags);
 	if (rq->tail == rq->head) {
 		spin_unlock_irqrestore(&rq->lock, flags);
-		dev->n_pkt_drops++;
+		/*
+		 * Count VL15 packets dropped due to no receive buffer.
+		 * Otherwise, count them as buffer overruns since usually,
+		 * the HW will be able to receive packets even if there are
+		 * no QPs with posted receive buffers.
+		 */
+		if (qp->ibqp.qp_num == 0)
+			dev->n_vl15_dropped++;
+		else
+			dev->rcv_errors++;
 		goto bail;
 	}
 	/* Silently drop packets which are too big. */
@@ -612,7 +627,7 @@
 	/*
 	 * Save the LMC lower bits if the destination LID is a unicast LID.
 	 */
-	wc.dlid_path_bits = dlid >= IPS_MULTICAST_LID_BASE ? 0 :
+	wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
 		dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
 	/* Signal completion event if the solicited bit is set. */
 	ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 2bb08af..e32fca9 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -57,17 +58,6 @@
 	size_t got;
 	int ret;
 
-#if 0
-	/*
-	 * XXX - causes MPI programs to fail, haven't had time to check
-	 * yet
-	 */
-	if (!capable(CAP_IPC_LOCK)) {
-		ret = -EPERM;
-		goto bail;
-	}
-#endif
-
 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
 		PAGE_SHIFT;
 
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 28fdbda..56ac336 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -36,7 +37,7 @@
 
 #include "ipath_kernel.h"
 #include "ipath_verbs.h"
-#include "ips_common.h"
+#include "ipath_common.h"
 
 /* Not static, because we don't want the compiler removing it */
 const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
@@ -55,9 +56,62 @@
 module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(debug, "Verbs debug mask");
 
+static unsigned int ib_ipath_max_pds = 0xFFFF;
+module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_pds,
+		 "Maximum number of protection domains to support");
+
+static unsigned int ib_ipath_max_ahs = 0xFFFF;
+module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
+
+unsigned int ib_ipath_max_cqes = 0x2FFFF;
+module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_cqes,
+		 "Maximum number of completion queue entries to support");
+
+unsigned int ib_ipath_max_cqs = 0x1FFFF;
+module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
+
+unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
+module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
+		   S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+
+unsigned int ib_ipath_max_sges = 0x60;
+module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
+
+unsigned int ib_ipath_max_mcast_grps = 16384;
+module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
+		   S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_mcast_grps,
+		 "Maximum number of multicast groups to support");
+
+unsigned int ib_ipath_max_mcast_qp_attached = 16;
+module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
+		   uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_mcast_qp_attached,
+		 "Maximum number of attached QPs to support");
+
+unsigned int ib_ipath_max_srqs = 1024;
+module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
+
+unsigned int ib_ipath_max_srq_sges = 128;
+module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
+		   uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
+
+unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
+module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
+		   uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("PathScale <support@pathscale.com>");
-MODULE_DESCRIPTION("Pathscale InfiniPath driver");
+MODULE_AUTHOR("QLogic <support@pathscale.com>");
+MODULE_DESCRIPTION("QLogic InfiniPath driver");
 
 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
 	[IB_QPS_RESET] = 0,
@@ -193,7 +247,7 @@
 		switch (qp->ibqp.qp_type) {
 		case IB_QPT_UC:
 		case IB_QPT_RC:
-			err = ipath_post_rc_send(qp, wr);
+			err = ipath_post_ruc_send(qp, wr);
 			break;
 
 		case IB_QPT_SMI:
@@ -375,7 +429,7 @@
 
 	/* Check for a valid destination LID (see ch. 7.11.1). */
 	lid = be16_to_cpu(hdr->lrh[1]);
-	if (lid < IPS_MULTICAST_LID_BASE) {
+	if (lid < IPATH_MULTICAST_LID_BASE) {
 		lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
 		if (unlikely(lid != ipath_layer_get_lid(dev->dd))) {
 			dev->rcv_errors++;
@@ -385,9 +439,9 @@
 
 	/* Check for GRH */
 	lnh = be16_to_cpu(hdr->lrh[0]) & 3;
-	if (lnh == IPS_LRH_BTH)
+	if (lnh == IPATH_LRH_BTH)
 		ohdr = &hdr->u.oth;
-	else if (lnh == IPS_LRH_GRH)
+	else if (lnh == IPATH_LRH_GRH)
 		ohdr = &hdr->u.l.oth;
 	else {
 		dev->rcv_errors++;
@@ -399,8 +453,8 @@
 	dev->opstats[opcode].n_packets++;
 
 	/* Get the destination QP number. */
-	qp_num = be32_to_cpu(ohdr->bth[1]) & IPS_QPN_MASK;
-	if (qp_num == IPS_MULTICAST_QPN) {
+	qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
+	if (qp_num == IPATH_MULTICAST_QPN) {
 		struct ipath_mcast *mcast;
 		struct ipath_mcast_qp *p;
 
@@ -411,7 +465,7 @@
 		}
 		dev->n_multicast_rcv++;
 		list_for_each_entry_rcu(p, &mcast->qp_list, list)
-			ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
+			ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
 				     tlen, p->qp);
 		/*
 		 * Notify ipath_multicast_detach() if it is waiting for us
@@ -423,7 +477,7 @@
 		qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
 		if (qp) {
 			dev->n_unicast_rcv++;
-			ipath_qp_rcv(dev, hdr, lnh == IPS_LRH_GRH, data,
+			ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
 				     tlen, qp);
 			/*
 			 * Notify ipath_destroy_qp() if it is waiting
@@ -567,40 +621,38 @@
 			      struct ib_device_attr *props)
 {
 	struct ipath_ibdev *dev = to_idev(ibdev);
-	u32 vendor, boardrev, majrev, minrev;
 
 	memset(props, 0, sizeof(*props));
 
 	props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
 		IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
 		IB_DEVICE_SYS_IMAGE_GUID;
-	ipath_layer_query_device(dev->dd, &vendor, &boardrev,
-				 &majrev, &minrev);
-	props->vendor_id = vendor;
-	props->vendor_part_id = boardrev;
-	props->hw_ver = boardrev << 16 | majrev << 8 | minrev;
+	props->vendor_id = ipath_layer_get_vendorid(dev->dd);
+	props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
+	props->hw_ver = ipath_layer_get_pcirev(dev->dd);
 
 	props->sys_image_guid = dev->sys_image_guid;
 
 	props->max_mr_size = ~0ull;
-	props->max_qp = 0xffff;
-	props->max_qp_wr = 0xffff;
-	props->max_sge = 255;
-	props->max_cq = 0xffff;
-	props->max_cqe = 0xffff;
-	props->max_mr = 0xffff;
-	props->max_pd = 0xffff;
+	props->max_qp = dev->qp_table.max;
+	props->max_qp_wr = ib_ipath_max_qp_wrs;
+	props->max_sge = ib_ipath_max_sges;
+	props->max_cq = ib_ipath_max_cqs;
+	props->max_ah = ib_ipath_max_ahs;
+	props->max_cqe = ib_ipath_max_cqes;
+	props->max_mr = dev->lk_table.max;
+	props->max_pd = ib_ipath_max_pds;
 	props->max_qp_rd_atom = 1;
 	props->max_qp_init_rd_atom = 1;
 	/* props->max_res_rd_atom */
-	props->max_srq = 0xffff;
-	props->max_srq_wr = 0xffff;
-	props->max_srq_sge = 255;
+	props->max_srq = ib_ipath_max_srqs;
+	props->max_srq_wr = ib_ipath_max_srq_wrs;
+	props->max_srq_sge = ib_ipath_max_srq_sges;
 	/* props->local_ca_ack_delay */
 	props->atomic_cap = IB_ATOMIC_HCA;
 	props->max_pkeys = ipath_layer_get_npkeys(dev->dd);
-	props->max_mcast_grp = 0xffff;
-	props->max_mcast_qp_attach = 0xffff;
+	props->max_mcast_grp = ib_ipath_max_mcast_grps;
+	props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 		props->max_mcast_grp;
 
@@ -643,10 +695,10 @@
 		ipath_layer_get_lastibcstat(dev->dd) & 0xf];
 	props->port_cap_flags = dev->port_cap_flags;
 	props->gid_tbl_len = 1;
-	props->max_msg_sz = 4096;
+	props->max_msg_sz = 0x80000000;
 	props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd);
 	props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) -
-		dev->n_pkey_violations;
+		dev->z_pkey_violations;
 	props->qkey_viol_cntr = dev->qkey_violations;
 	props->active_width = IB_WIDTH_4X;
 	/* See rate_show() */
@@ -743,15 +795,30 @@
 				    struct ib_ucontext *context,
 				    struct ib_udata *udata)
 {
+	struct ipath_ibdev *dev = to_idev(ibdev);
 	struct ipath_pd *pd;
 	struct ib_pd *ret;
 
+	/*
+	 * This is actually totally arbitrary.	Some correctness tests
+	 * assume there's a maximum number of PDs that can be allocated.
+	 * We don't actually have this limit, but we fail the test if
+	 * we allow allocations of more than we report for this value.
+	 */
+
+	if (dev->n_pds_allocated == ib_ipath_max_pds) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
+
 	pd = kmalloc(sizeof *pd, GFP_KERNEL);
 	if (!pd) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
 	}
 
+	dev->n_pds_allocated++;
+
 	/* ib_alloc_pd() will initialize pd->ibpd. */
 	pd->user = udata != NULL;
 
@@ -764,6 +831,9 @@
 static int ipath_dealloc_pd(struct ib_pd *ibpd)
 {
 	struct ipath_pd *pd = to_ipd(ibpd);
+	struct ipath_ibdev *dev = to_idev(ibpd->device);
+
+	dev->n_pds_allocated--;
 
 	kfree(pd);
 
@@ -782,21 +852,40 @@
 {
 	struct ipath_ah *ah;
 	struct ib_ah *ret;
+	struct ipath_ibdev *dev = to_idev(pd->device);
+
+	if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
+		ret = ERR_PTR(-ENOMEM);
+		goto bail;
+	}
 
 	/* A multicast address requires a GRH (see ch. 8.4.1). */
-	if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
-	    ah_attr->dlid != IPS_PERMISSIVE_LID &&
+	if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
+	    ah_attr->dlid != IPATH_PERMISSIVE_LID &&
 	    !(ah_attr->ah_flags & IB_AH_GRH)) {
 		ret = ERR_PTR(-EINVAL);
 		goto bail;
 	}
 
+	if (ah_attr->dlid == 0) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
+	if (ah_attr->port_num < 1 ||
+	    ah_attr->port_num > pd->device->phys_port_cnt) {
+		ret = ERR_PTR(-EINVAL);
+		goto bail;
+	}
+
 	ah = kmalloc(sizeof *ah, GFP_ATOMIC);
 	if (!ah) {
 		ret = ERR_PTR(-ENOMEM);
 		goto bail;
 	}
 
+	dev->n_ahs_allocated++;
+
 	/* ib_create_ah() will initialize ah->ibah. */
 	ah->attr = *ah_attr;
 
@@ -814,8 +903,11 @@
  */
 static int ipath_destroy_ah(struct ib_ah *ibah)
 {
+	struct ipath_ibdev *dev = to_idev(ibah->device);
 	struct ipath_ah *ah = to_iah(ibah);
 
+	dev->n_ahs_allocated--;
+
 	kfree(ah);
 
 	return 0;
@@ -889,6 +981,7 @@
  */
 static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
 {
+	struct ipath_layer_counters cntrs;
 	struct ipath_ibdev *idev;
 	struct ib_device *dev;
 	int ret;
@@ -939,6 +1032,25 @@
 	idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT;
 	idev->link_width_enabled = 3;	/* 1x or 4x */
 
+	/* Snapshot current HW counters to "clear" them. */
+	ipath_layer_get_counters(dd, &cntrs);
+	idev->z_symbol_error_counter = cntrs.symbol_error_counter;
+	idev->z_link_error_recovery_counter =
+		cntrs.link_error_recovery_counter;
+	idev->z_link_downed_counter = cntrs.link_downed_counter;
+	idev->z_port_rcv_errors = cntrs.port_rcv_errors;
+	idev->z_port_rcv_remphys_errors =
+		cntrs.port_rcv_remphys_errors;
+	idev->z_port_xmit_discards = cntrs.port_xmit_discards;
+	idev->z_port_xmit_data = cntrs.port_xmit_data;
+	idev->z_port_rcv_data = cntrs.port_rcv_data;
+	idev->z_port_xmit_packets = cntrs.port_xmit_packets;
+	idev->z_port_rcv_packets = cntrs.port_rcv_packets;
+	idev->z_local_link_integrity_errors =
+		cntrs.local_link_integrity_errors;
+	idev->z_excessive_buffer_overrun_errors =
+		cntrs.excessive_buffer_overrun_errors;
+
 	/*
 	 * The system image GUID is supposed to be the same for all
 	 * IB HCAs in a single system but since there can be other
@@ -1109,11 +1221,8 @@
 {
 	struct ipath_ibdev *dev =
 		container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
-	int vendor, boardrev, majrev, minrev;
 
-	ipath_layer_query_device(dev->dd, &vendor, &boardrev,
-				 &majrev, &minrev);
-	return sprintf(buf, "%d.%d\n", majrev, minrev);
+	return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd));
 }
 
 static ssize_t show_hca(struct class_device *cdev, char *buf)
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 4f8d593..2df6847 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -148,6 +149,7 @@
 	struct list_head qp_list;
 	wait_queue_head_t wait;
 	atomic_t refcount;
+	int n_attached;
 };
 
 /* Memory region */
@@ -305,32 +307,34 @@
 	u32 s_next_psn;		/* PSN for next request */
 	u32 s_last_psn;		/* last response PSN processed */
 	u32 s_psn;		/* current packet sequence number */
+	u32 s_ack_psn;		/* PSN for RDMA_READ */
 	u32 s_rnr_timeout;	/* number of milliseconds for RNR timeout */
-	u32 s_ack_psn;		/* PSN for next ACK or RDMA_READ */
-	u64 s_ack_atomic;	/* data for atomic ACK */
+	u32 r_ack_psn;		/* PSN for next ACK or atomic ACK */
 	u64 r_wr_id;		/* ID for current receive WQE */
 	u64 r_atomic_data;	/* data for last atomic op */
 	u32 r_atomic_psn;	/* PSN of last atomic op */
 	u32 r_len;		/* total length of r_sge */
 	u32 r_rcv_len;		/* receive data len processed */
 	u32 r_psn;		/* expected rcv packet sequence number */
+	u32 r_msn;		/* message sequence number */
 	u8 state;		/* QP state */
 	u8 s_state;		/* opcode of last packet sent */
 	u8 s_ack_state;		/* opcode of packet to ACK */
 	u8 s_nak_state;		/* non-zero if NAK is pending */
 	u8 r_state;		/* opcode of last packet received */
+	u8 r_ack_state;		/* opcode of packet to ACK */
+	u8 r_nak_state;		/* non-zero if NAK is pending */
+	u8 r_min_rnr_timer;	/* retry timeout value for RNR NAKs */
 	u8 r_reuse_sge;		/* for UC receive errors */
 	u8 r_sge_inx;		/* current index into sg_list */
-	u8 s_max_sge;		/* size of s_wq->sg_list */
 	u8 qp_access_flags;
+	u8 s_max_sge;		/* size of s_wq->sg_list */
 	u8 s_retry_cnt;		/* number of times to retry */
 	u8 s_rnr_retry_cnt;
-	u8 s_min_rnr_timer;
 	u8 s_retry;		/* requester retry counter */
 	u8 s_rnr_retry;		/* requester RNR retry counter */
 	u8 s_pkey_index;	/* PKEY index to use */
 	enum ib_mtu path_mtu;
-	atomic_t msn;		/* message sequence number */
 	u32 remote_qpn;
 	u32 qkey;		/* QKEY for this QP (for UD or RD) */
 	u32 s_size;		/* send work queue size */
@@ -431,6 +435,11 @@
 	__be64 sys_image_guid;	/* in network order */
 	__be64 gid_prefix;	/* in network order */
 	__be64 mkey;
+	u32 n_pds_allocated;	/* number of PDs allocated for device */
+	u32 n_ahs_allocated;	/* number of AHs allocated for device */
+	u32 n_cqs_allocated;	/* number of CQs allocated for device */
+	u32 n_srqs_allocated;	/* number of SRQs allocated for device */
+	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
 	u64 ipath_sword;	/* total dwords sent (sample result) */
 	u64 ipath_rword;	/* total dwords received (sample result) */
 	u64 ipath_spkts;	/* total packets sent (sample result) */
@@ -442,17 +451,19 @@
 	u64 n_unicast_rcv;	/* total unicast packets received */
 	u64 n_multicast_xmit;	/* total multicast packets sent */
 	u64 n_multicast_rcv;	/* total multicast packets received */
-	u64 n_symbol_error_counter;	/* starting count for PMA */
-	u64 n_link_error_recovery_counter;	/* starting count for PMA */
-	u64 n_link_downed_counter;	/* starting count for PMA */
-	u64 n_port_rcv_errors;	/* starting count for PMA */
-	u64 n_port_rcv_remphys_errors;	/* starting count for PMA */
-	u64 n_port_xmit_discards;	/* starting count for PMA */
-	u64 n_port_xmit_data;	/* starting count for PMA */
-	u64 n_port_rcv_data;	/* starting count for PMA */
-	u64 n_port_xmit_packets;	/* starting count for PMA */
-	u64 n_port_rcv_packets;	/* starting count for PMA */
-	u32 n_pkey_violations;	/* starting count for PMA */
+	u64 z_symbol_error_counter;		/* starting count for PMA */
+	u64 z_link_error_recovery_counter;	/* starting count for PMA */
+	u64 z_link_downed_counter;		/* starting count for PMA */
+	u64 z_port_rcv_errors;			/* starting count for PMA */
+	u64 z_port_rcv_remphys_errors;		/* starting count for PMA */
+	u64 z_port_xmit_discards;		/* starting count for PMA */
+	u64 z_port_xmit_data;			/* starting count for PMA */
+	u64 z_port_rcv_data;			/* starting count for PMA */
+	u64 z_port_xmit_packets;		/* starting count for PMA */
+	u64 z_port_rcv_packets;			/* starting count for PMA */
+	u32 z_pkey_violations;			/* starting count for PMA */
+	u32 z_local_link_integrity_errors;	/* starting count for PMA */
+	u32 z_excessive_buffer_overrun_errors;	/* starting count for PMA */
 	u32 n_rc_resends;
 	u32 n_rc_acks;
 	u32 n_rc_qacks;
@@ -462,6 +473,7 @@
 	u32 n_other_naks;
 	u32 n_timeouts;
 	u32 n_pkt_drops;
+	u32 n_vl15_dropped;
 	u32 n_wqe_errs;
 	u32 n_rdma_dup_busy;
 	u32 n_piowait;
@@ -580,10 +592,6 @@
 
 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
 
-void ipath_do_rc_send(unsigned long data);
-
-void ipath_do_uc_send(unsigned long data);
-
 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
 
 int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
@@ -596,7 +604,7 @@
 
 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
 
-int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
+int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr);
 
 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
 		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
@@ -678,7 +686,19 @@
 
 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
 
-void ipath_ruc_loopback(struct ipath_qp *sqp, struct ib_wc *wc);
+u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
+		   struct ib_global_route *grh, u32 hwords, u32 nwords);
+
+void ipath_do_ruc_send(unsigned long data);
+
+u32 ipath_make_rc_ack(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
+		      u32 pmtu);
+
+int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
+		      u32 pmtu, u32 *bth0p, u32 *bth2p);
+
+int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
+		      u32 pmtu, u32 *bth0p, u32 *bth2p);
 
 extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
 
@@ -688,6 +708,24 @@
 
 extern unsigned int ib_ipath_lkey_table_size;
 
+extern unsigned int ib_ipath_max_cqes;
+
+extern unsigned int ib_ipath_max_cqs;
+
+extern unsigned int ib_ipath_max_qp_wrs;
+
+extern unsigned int ib_ipath_max_sges;
+
+extern unsigned int ib_ipath_max_mcast_grps;
+
+extern unsigned int ib_ipath_max_mcast_qp_attached;
+
+extern unsigned int ib_ipath_max_srqs;
+
+extern unsigned int ib_ipath_max_srq_sges;
+
+extern unsigned int ib_ipath_max_srq_wrs;
+
 extern const u32 ib_ipath_rnr_table[];
 
 #endif				/* IPATH_VERBS_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index 10b31d2..ee0e1d9 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
@@ -92,6 +93,7 @@
 	INIT_LIST_HEAD(&mcast->qp_list);
 	init_waitqueue_head(&mcast->wait);
 	atomic_set(&mcast->refcount, 0);
+	mcast->n_attached = 0;
 
 bail:
 	return mcast;
@@ -157,7 +159,8 @@
  * the table but the QP was added.  Return ESRCH if the QP was already
  * attached and neither structure was added.
  */
-static int ipath_mcast_add(struct ipath_mcast *mcast,
+static int ipath_mcast_add(struct ipath_ibdev *dev,
+			   struct ipath_mcast *mcast,
 			   struct ipath_mcast_qp *mqp)
 {
 	struct rb_node **n = &mcast_tree.rb_node;
@@ -188,34 +191,47 @@
 		/* Search the QP list to see if this is already there. */
 		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
 			if (p->qp == mqp->qp) {
-				spin_unlock_irqrestore(&mcast_lock, flags);
 				ret = ESRCH;
 				goto bail;
 			}
 		}
+		if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
+			ret = ENOMEM;
+			goto bail;
+		}
+
+		tmcast->n_attached++;
+
 		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
-		spin_unlock_irqrestore(&mcast_lock, flags);
 		ret = EEXIST;
 		goto bail;
 	}
 
+	if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
+		ret = ENOMEM;
+		goto bail;
+	}
+
+	dev->n_mcast_grps_allocated++;
+
 	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
 
 	atomic_inc(&mcast->refcount);
 	rb_link_node(&mcast->rb_node, pn, n);
 	rb_insert_color(&mcast->rb_node, &mcast_tree);
 
-	spin_unlock_irqrestore(&mcast_lock, flags);
-
 	ret = 0;
 
 bail:
+	spin_unlock_irqrestore(&mcast_lock, flags);
+
 	return ret;
 }
 
 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
 	struct ipath_qp *qp = to_iqp(ibqp);
+	struct ipath_ibdev *dev = to_idev(ibqp->device);
 	struct ipath_mcast *mcast;
 	struct ipath_mcast_qp *mqp;
 	int ret;
@@ -235,7 +251,7 @@
 		ret = -ENOMEM;
 		goto bail;
 	}
-	switch (ipath_mcast_add(mcast, mqp)) {
+	switch (ipath_mcast_add(dev, mcast, mqp)) {
 	case ESRCH:
 		/* Neither was used: can't attach the same QP twice. */
 		ipath_mcast_qp_free(mqp);
@@ -245,6 +261,12 @@
 	case EEXIST:		/* The mcast wasn't used */
 		ipath_mcast_free(mcast);
 		break;
+	case ENOMEM:
+		/* Exceeded the maximum number of mcast groups. */
+		ipath_mcast_qp_free(mqp);
+		ipath_mcast_free(mcast);
+		ret = -ENOMEM;
+		goto bail;
 	default:
 		break;
 	}
@@ -258,6 +280,7 @@
 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
 	struct ipath_qp *qp = to_iqp(ibqp);
+	struct ipath_ibdev *dev = to_idev(ibqp->device);
 	struct ipath_mcast *mcast = NULL;
 	struct ipath_mcast_qp *p, *tmp;
 	struct rb_node *n;
@@ -272,7 +295,7 @@
 	while (1) {
 		if (n == NULL) {
 			spin_unlock_irqrestore(&mcast_lock, flags);
-			ret = 0;
+			ret = -EINVAL;
 			goto bail;
 		}
 
@@ -296,6 +319,7 @@
 		 * link until we are sure there are no list walkers.
 		 */
 		list_del_rcu(&p->list);
+		mcast->n_attached--;
 
 		/* If this was the last attached QP, remove the GID too. */
 		if (list_empty(&mcast->qp_list)) {
@@ -319,6 +343,7 @@
 		atomic_dec(&mcast->refcount);
 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
 		ipath_mcast_free(mcast);
+		dev->n_mcast_grps_allocated--;
 	}
 
 	ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
index adc5322..f8f9e2e 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h
deleted file mode 100644
index ab7cbbb..0000000
--- a/drivers/infiniband/hw/ipath/ips_common.h
+++ /dev/null
@@ -1,263 +0,0 @@
-#ifndef IPS_COMMON_H
-#define IPS_COMMON_H
-/*
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_common.h"
-
-struct ipath_header {
-	/*
-	 * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
-	 * 14 bits before ECO change ~28 Dec 03.  After that, Vers 4,
-	 * Port 3, TID 11, offset 14.
-	 */
-	__le32 ver_port_tid_offset;
-	__le16 chksum;
-	__le16 pkt_flags;
-};
-
-struct ips_message_header {
-	__be16 lrh[4];
-	__be32 bth[3];
-	/* fields below this point are in host byte order */
-	struct ipath_header iph;
-	__u8 sub_opcode;
-	__u8 flags;
-	__u16 src_rank;
-	/* 24 bits. The upper 8 bit is available for other use */
-	union {
-		struct {
-			unsigned ack_seq_num:24;
-			unsigned port:4;
-			unsigned unused:4;
-		};
-		__u32 ack_seq_num_org;
-	};
-	__u8 expected_tid_session_id;
-	__u8 tinylen;		/* to aid MPI */
-	union {
-	    __u16 tag;		/* to aid MPI */
-	    __u16 mqhdr;	/* for PSM MQ */
-	};
-	union {
-		__u32 mpi[4];	/* to aid MPI */
-		__u32 data[4];
-		__u64 mq[2];	/* for PSM MQ */
-		struct {
-			__u16 mtu;
-			__u8 major_ver;
-			__u8 minor_ver;
-			__u32 not_used;	//free
-			__u32 run_id;
-			__u32 client_ver;
-		};
-	};
-};
-
-struct ether_header {
-	__be16 lrh[4];
-	__be32 bth[3];
-	struct ipath_header iph;
-	__u8 sub_opcode;
-	__u8 cmd;
-	__be16 lid;
-	__u16 mac[3];
-	__u8 frag_num;
-	__u8 seq_num;
-	__le32 len;
-	/* MUST be of word size due to PIO write requirements */
-	__le32 csum;
-	__le16 csum_offset;
-	__le16 flags;
-	__u16 first_2_bytes;
-	__u8 unused[2];		/* currently unused */
-};
-
-/*
- * The PIO buffer used for sending infinipath messages must only be written
- * in 32-bit words, all the data must be written, and no writes can occur
- * after the last word is written (which transfers "ownership" of the buffer
- * to the chip and triggers the message to be sent).
- * Since the Linux sk_buff structure can be recursive, non-aligned, and
- * any number of bytes in each segment, we use the following structure
- * to keep information about the overall state of the copy operation.
- * This is used to save the information needed to store the checksum
- * in the right place before sending the last word to the hardware and
- * to buffer the last 0-3 bytes of non-word sized segments.
- */
-struct copy_data_s {
-	struct ether_header *hdr;
-	/* addr of PIO buf to write csum to */
-	__u32 __iomem *csum_pio;
-	__u32 __iomem *to;	/* addr of PIO buf to write data to */
-	__u32 device;		/* which device to allocate PIO bufs from */
-	__s32 error;		/* set if there is an error. */
-	__s32 extra;		/* amount of data saved in u.buf below */
-	__u32 len;		/* total length to send in bytes */
-	__u32 flen;		/* frament length in words */
-	__u32 csum;		/* partial IP checksum */
-	__u32 pos;		/* position for partial checksum */
-	__u32 offset;		/* offset to where data currently starts */
-	__s32 checksum_calc;	/* set to 1 when csum has been calculated */
-	struct sk_buff *skb;
-	union {
-		__u32 w;
-		__u8 buf[4];
-	} u;
-};
-
-/* IB - LRH header consts */
-#define IPS_LRH_GRH 0x0003	/* 1. word of IB LRH - next header: GRH */
-#define IPS_LRH_BTH 0x0002	/* 1. word of IB LRH - next header: BTH */
-
-#define IPS_OFFSET  0
-
-/*
- * defines the cut-off point between the header queue and eager/expected
- * TID queue
- */
-#define NUM_OF_EXTRA_WORDS_IN_HEADER_QUEUE \
-	((sizeof(struct ips_message_header) - \
-	  offsetof(struct ips_message_header, iph)) >> 2)
-
-/* OpCodes  */
-#define OPCODE_IPS 0xC0
-#define OPCODE_ITH4X 0xC1
-
-/* OpCode 30 is use by stand-alone test programs  */
-#define OPCODE_RAW_DATA 0xDE
-/* last OpCode (31) is reserved for test  */
-#define OPCODE_TEST 0xDF
-
-/* sub OpCodes - ips  */
-#define OPCODE_SEQ_DATA 0x01
-#define OPCODE_SEQ_CTRL 0x02
-
-#define OPCODE_SEQ_MQ_DATA 0x03
-#define OPCODE_SEQ_MQ_CTRL 0x04
-
-#define OPCODE_ACK 0x10
-#define OPCODE_NAK 0x11
-
-#define OPCODE_ERR_CHK 0x20
-#define OPCODE_ERR_CHK_PLS 0x21
-
-#define OPCODE_STARTUP 0x30
-#define OPCODE_STARTUP_ACK 0x31
-#define OPCODE_STARTUP_NAK 0x32
-
-#define OPCODE_STARTUP_EXT 0x34
-#define OPCODE_STARTUP_ACK_EXT 0x35
-#define OPCODE_STARTUP_NAK_EXT 0x36
-
-#define OPCODE_TIDS_RELEASE 0x40
-#define OPCODE_TIDS_RELEASE_CONFIRM 0x41
-
-#define OPCODE_CLOSE 0x50
-#define OPCODE_CLOSE_ACK 0x51
-/*
- * like OPCODE_CLOSE, but no complaint if other side has already closed.
- * Used when doing abort(), MPI_Abort(), etc.
- */
-#define OPCODE_ABORT 0x52
-
-/* sub OpCodes - ith4x  */
-#define OPCODE_ENCAP 0x81
-#define OPCODE_LID_ARP 0x82
-
-/* Receive Header Queue: receive type (from infinipath) */
-#define RCVHQ_RCV_TYPE_EXPECTED  0
-#define RCVHQ_RCV_TYPE_EAGER     1
-#define RCVHQ_RCV_TYPE_NON_KD    2
-#define RCVHQ_RCV_TYPE_ERROR     3
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define EAGER_TID_ID INFINIPATH_I_TID_MASK
-
-#define IPS_DEFAULT_P_KEY 0xFFFF
-
-#define IPS_PERMISSIVE_LID 0xFFFF
-#define IPS_MULTICAST_LID_BASE 0xC000
-
-#define IPS_AETH_CREDIT_SHIFT 24
-#define IPS_AETH_CREDIT_MASK 0x1F
-#define IPS_AETH_CREDIT_INVAL 0x1F
-
-#define IPS_PSN_MASK 0xFFFFFF
-#define IPS_MSN_MASK 0xFFFFFF
-#define IPS_QPN_MASK 0xFFFFFF
-#define IPS_MULTICAST_QPN 0xFFFFFF
-
-/* functions for extracting fields from rcvhdrq entries */
-static inline __u32 ips_get_hdr_err_flags(const __le32 * rbuf)
-{
-	return __le32_to_cpu(rbuf[1]);
-}
-
-static inline __u32 ips_get_index(const __le32 * rbuf)
-{
-	return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
-	    & INFINIPATH_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 ips_get_rcv_type(const __le32 * rbuf)
-{
-	return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
-	    & INFINIPATH_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 ips_get_length_in_bytes(const __le32 * rbuf)
-{
-	return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
-		& INFINIPATH_RHF_LENGTH_MASK) << 2;
-}
-
-static inline void *ips_get_first_protocol_header(const __u32 * rbuf)
-{
-	return (void *)&rbuf[2];
-}
-
-static inline struct ips_message_header *ips_get_ips_header(const __u32 *
-							    rbuf)
-{
-	return (struct ips_message_header *)&rbuf[2];
-}
-
-static inline __u32 ips_get_ipath_ver(__le32 hdrword)
-{
-	return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
-	    & INFINIPATH_I_VERS_MASK;
-}
-
-#endif				/* IPS_COMMON_H */
diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h
index 40d693c..6186676 100644
--- a/drivers/infiniband/hw/ipath/verbs_debug.h
+++ b/drivers/infiniband/hw/ipath/verbs_debug.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index e7cf6be..9fefe56 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -395,7 +395,8 @@
 	/* check if the resource is already in use, skip if the
 	 * device is active because it itself may be in use */
 	if(!dev->active) {
-		if (request_irq(*irq, pnp_test_handler, SA_INTERRUPT, "pnp", NULL))
+		if (request_irq(*irq, pnp_test_handler,
+				SA_INTERRUPT|SA_PROBEIRQ, "pnp", NULL))
 			return 0;
 		free_irq(*irq, NULL);
 	}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d51afbe..f5b9f18 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -182,6 +182,22 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-rs5c372.
 
+config RTC_DRV_S3C
+	tristate "Samsung S3C series SoC RTC"
+	depends on RTC_CLASS && ARCH_S3C2410
+	help
+	  RTC (Realtime Clock) driver for the clock inbuilt into the
+	  Samsung S3C24XX series of SoCs. This can provide periodic
+	  interrupt rates from 1Hz to 64Hz for user programs, and
+	  wakeup from Alarm.
+
+	  The driver currently supports the common features on all the
+	  S3C24XX range, such as the S3C2410, S3C2412, S3C2413, S3C2440
+	  and S3C2442.
+
+	  This driver can also be build as a module. If so, the module
+	  will be called rtc-s3c.
+
 config RTC_DRV_M48T86
 	tristate "ST M48T86/Dallas DS12887"
 	depends on RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index da5e387..5422071 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_RTC_DRV_PCF8563)	+= rtc-pcf8563.o
 obj-$(CONFIG_RTC_DRV_PCF8583)	+= rtc-pcf8583.o
 obj-$(CONFIG_RTC_DRV_RS5C372)	+= rtc-rs5c372.o
+obj-$(CONFIG_RTC_DRV_S3C)	+= rtc-s3c.o
 obj-$(CONFIG_RTC_DRV_RS5C348)	+= rtc-rs5c348.o
 obj-$(CONFIG_RTC_DRV_M48T86)	+= rtc-m48t86.o
 obj-$(CONFIG_RTC_DRV_DS1553)	+= rtc-ds1553.o
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
new file mode 100644
index 0000000..d6d1bff
--- /dev/null
+++ b/drivers/rtc/rtc-s3c.c
@@ -0,0 +1,607 @@
+/* drivers/rtc/rtc-s3c.c
+ *
+ * Copyright (c) 2004,2006 Simtec Electronics
+ *	Ben Dooks, <ben@simtec.co.uk>
+ *	http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * S3C2410/S3C2440/S3C24XX Internal RTC Driver
+*/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+
+#include <asm/hardware.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/rtc.h>
+
+#include <asm/mach/time.h>
+
+#include <asm/arch/regs-rtc.h>
+
+/* I have yet to find an S3C implementation with more than one
+ * of these rtc blocks in */
+
+static struct resource *s3c_rtc_mem;
+
+static void __iomem *s3c_rtc_base;
+static int s3c_rtc_alarmno = NO_IRQ;
+static int s3c_rtc_tickno  = NO_IRQ;
+static int s3c_rtc_freq    = 1;
+
+static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
+static unsigned int tick_count;
+
+/* IRQ Handlers */
+
+static irqreturn_t s3c_rtc_alarmirq(int irq, void *id, struct pt_regs *r)
+{
+	struct rtc_device *rdev = id;
+
+	rtc_update_irq(&rdev->class_dev, 1, RTC_AF | RTC_IRQF);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t s3c_rtc_tickirq(int irq, void *id, struct pt_regs *r)
+{
+	struct rtc_device *rdev = id;
+
+	rtc_update_irq(&rdev->class_dev, tick_count++, RTC_PF | RTC_IRQF);
+	return IRQ_HANDLED;
+}
+
+/* Update control registers */
+static void s3c_rtc_setaie(int to)
+{
+	unsigned int tmp;
+
+	pr_debug("%s: aie=%d\n", __FUNCTION__, to);
+
+	tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
+
+	if (to)
+		tmp |= S3C2410_RTCALM_ALMEN;
+
+	writeb(tmp, S3C2410_RTCALM);
+}
+
+static void s3c_rtc_setpie(int to)
+{
+	unsigned int tmp;
+
+	pr_debug("%s: pie=%d\n", __FUNCTION__, to);
+
+	spin_lock_irq(&s3c_rtc_pie_lock);
+	tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
+
+	if (to)
+		tmp |= S3C2410_TICNT_ENABLE;
+
+	writeb(tmp, S3C2410_TICNT);
+	spin_unlock_irq(&s3c_rtc_pie_lock);
+}
+
+static void s3c_rtc_setfreq(int freq)
+{
+	unsigned int tmp;
+
+	spin_lock_irq(&s3c_rtc_pie_lock);
+	tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
+
+	s3c_rtc_freq = freq;
+
+	tmp |= (128 / freq)-1;
+
+	writeb(tmp, S3C2410_TICNT);
+	spin_unlock_irq(&s3c_rtc_pie_lock);
+}
+
+/* Time read/write */
+
+static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+	unsigned int have_retried = 0;
+
+ retry_get_time:
+	rtc_tm->tm_min  = readb(S3C2410_RTCMIN);
+	rtc_tm->tm_hour = readb(S3C2410_RTCHOUR);
+	rtc_tm->tm_mday = readb(S3C2410_RTCDATE);
+	rtc_tm->tm_mon  = readb(S3C2410_RTCMON);
+	rtc_tm->tm_year = readb(S3C2410_RTCYEAR);
+	rtc_tm->tm_sec  = readb(S3C2410_RTCSEC);
+
+	/* the only way to work out wether the system was mid-update
+	 * when we read it is to check the second counter, and if it
+	 * is zero, then we re-try the entire read
+	 */
+
+	if (rtc_tm->tm_sec == 0 && !have_retried) {
+		have_retried = 1;
+		goto retry_get_time;
+	}
+
+	pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
+		 rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+		 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+
+	BCD_TO_BIN(rtc_tm->tm_sec);
+	BCD_TO_BIN(rtc_tm->tm_min);
+	BCD_TO_BIN(rtc_tm->tm_hour);
+	BCD_TO_BIN(rtc_tm->tm_mday);
+	BCD_TO_BIN(rtc_tm->tm_mon);
+	BCD_TO_BIN(rtc_tm->tm_year);
+
+	rtc_tm->tm_year += 100;
+	rtc_tm->tm_mon -= 1;
+
+	return 0;
+}
+
+static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+	/* the rtc gets round the y2k problem by just not supporting it */
+
+	if (tm->tm_year < 100)
+		return -EINVAL;
+
+	writeb(BIN2BCD(tm->tm_sec),  S3C2410_RTCSEC);
+	writeb(BIN2BCD(tm->tm_min),  S3C2410_RTCMIN);
+	writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR);
+	writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE);
+	writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON);
+	writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR);
+
+	return 0;
+}
+
+static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct rtc_time *alm_tm = &alrm->time;
+	unsigned int alm_en;
+
+	alm_tm->tm_sec  = readb(S3C2410_ALMSEC);
+	alm_tm->tm_min  = readb(S3C2410_ALMMIN);
+	alm_tm->tm_hour = readb(S3C2410_ALMHOUR);
+	alm_tm->tm_mon  = readb(S3C2410_ALMMON);
+	alm_tm->tm_mday = readb(S3C2410_ALMDATE);
+	alm_tm->tm_year = readb(S3C2410_ALMYEAR);
+
+	alm_en = readb(S3C2410_RTCALM);
+
+	pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+		 alm_en,
+		 alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+		 alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
+
+
+	/* decode the alarm enable field */
+
+	if (alm_en & S3C2410_RTCALM_SECEN)
+		BCD_TO_BIN(alm_tm->tm_sec);
+	else
+		alm_tm->tm_sec = 0xff;
+
+	if (alm_en & S3C2410_RTCALM_MINEN)
+		BCD_TO_BIN(alm_tm->tm_min);
+	else
+		alm_tm->tm_min = 0xff;
+
+	if (alm_en & S3C2410_RTCALM_HOUREN)
+		BCD_TO_BIN(alm_tm->tm_hour);
+	else
+		alm_tm->tm_hour = 0xff;
+
+	if (alm_en & S3C2410_RTCALM_DAYEN)
+		BCD_TO_BIN(alm_tm->tm_mday);
+	else
+		alm_tm->tm_mday = 0xff;
+
+	if (alm_en & S3C2410_RTCALM_MONEN) {
+		BCD_TO_BIN(alm_tm->tm_mon);
+		alm_tm->tm_mon -= 1;
+	} else {
+		alm_tm->tm_mon = 0xff;
+	}
+
+	if (alm_en & S3C2410_RTCALM_YEAREN)
+		BCD_TO_BIN(alm_tm->tm_year);
+	else
+		alm_tm->tm_year = 0xffff;
+
+	return 0;
+}
+
+static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct rtc_time *tm = &alrm->time;
+	unsigned int alrm_en;
+
+	pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+		 alrm->enabled,
+		 tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
+		 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+
+
+	alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
+	writeb(0x00, S3C2410_RTCALM);
+
+	if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
+		alrm_en |= S3C2410_RTCALM_SECEN;
+		writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC);
+	}
+
+	if (tm->tm_min < 60 && tm->tm_min >= 0) {
+		alrm_en |= S3C2410_RTCALM_MINEN;
+		writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN);
+	}
+
+	if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
+		alrm_en |= S3C2410_RTCALM_HOUREN;
+		writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR);
+	}
+
+	pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
+
+	writeb(alrm_en, S3C2410_RTCALM);
+
+	if (0) {
+		alrm_en = readb(S3C2410_RTCALM);
+		alrm_en &= ~S3C2410_RTCALM_ALMEN;
+		writeb(alrm_en, S3C2410_RTCALM);
+		disable_irq_wake(s3c_rtc_alarmno);
+	}
+
+	if (alrm->enabled)
+		enable_irq_wake(s3c_rtc_alarmno);
+	else
+		disable_irq_wake(s3c_rtc_alarmno);
+
+	return 0;
+}
+
+static int s3c_rtc_ioctl(struct device *dev,
+			 unsigned int cmd, unsigned long arg)
+{
+	unsigned int ret = -ENOIOCTLCMD;
+
+	switch (cmd) {
+	case RTC_AIE_OFF:
+	case RTC_AIE_ON:
+		s3c_rtc_setaie((cmd == RTC_AIE_ON) ? 1 : 0);
+		ret = 0;
+		break;
+
+	case RTC_PIE_OFF:
+	case RTC_PIE_ON:
+		tick_count = 0;
+		s3c_rtc_setpie((cmd == RTC_PIE_ON) ? 1 : 0);
+		ret = 0;
+		break;
+
+	case RTC_IRQP_READ:
+		ret = put_user(s3c_rtc_freq, (unsigned long __user *)arg);
+		break;
+
+	case RTC_IRQP_SET:
+		/* check for power of 2 */
+
+		if ((arg & (arg-1)) != 0 || arg < 1) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		pr_debug("s3c2410_rtc: setting frequency %ld\n", arg);
+
+		s3c_rtc_setfreq(arg);
+		ret = 0;
+		break;
+
+	case RTC_UIE_ON:
+	case RTC_UIE_OFF:
+		ret = -EINVAL;
+	}
+
+ exit:
+	return ret;
+}
+
+static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+	unsigned int rtcalm = readb(S3C2410_RTCALM);
+	unsigned int ticnt = readb (S3C2410_TICNT);
+
+	seq_printf(seq, "alarm_IRQ\t: %s\n",
+		   (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
+
+	seq_printf(seq, "periodic_IRQ\t: %s\n",
+		     (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" );
+
+	seq_printf(seq, "periodic_freq\t: %d\n", s3c_rtc_freq);
+
+	return 0;
+}
+
+static int s3c_rtc_open(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+	int ret;
+
+	ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
+			  SA_INTERRUPT,  "s3c2410-rtc alarm", rtc_dev);
+
+	if (ret) {
+		dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
+		return ret;
+	}
+
+	ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
+			  SA_INTERRUPT,  "s3c2410-rtc tick", rtc_dev);
+
+	if (ret) {
+		dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
+		goto tick_err;
+	}
+
+	return ret;
+
+ tick_err:
+	free_irq(s3c_rtc_alarmno, rtc_dev);
+	return ret;
+}
+
+static void s3c_rtc_release(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+
+	/* do not clear AIE here, it may be needed for wake */
+
+	s3c_rtc_setpie(0);
+	free_irq(s3c_rtc_alarmno, rtc_dev);
+	free_irq(s3c_rtc_tickno, rtc_dev);
+}
+
+static struct rtc_class_ops s3c_rtcops = {
+	.open		= s3c_rtc_open,
+	.release	= s3c_rtc_release,
+	.ioctl		= s3c_rtc_ioctl,
+	.read_time	= s3c_rtc_gettime,
+	.set_time	= s3c_rtc_settime,
+	.read_alarm	= s3c_rtc_getalarm,
+	.set_alarm	= s3c_rtc_setalarm,
+	.proc	        = s3c_rtc_proc,
+};
+
+static void s3c_rtc_enable(struct platform_device *pdev, int en)
+{
+	unsigned int tmp;
+
+	if (s3c_rtc_base == NULL)
+		return;
+
+	if (!en) {
+		tmp = readb(S3C2410_RTCCON);
+		writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON);
+
+		tmp = readb(S3C2410_TICNT);
+		writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT);
+	} else {
+		/* re-enable the device, and check it is ok */
+
+		if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
+			dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
+
+			tmp = readb(S3C2410_RTCCON);
+			writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON);
+		}
+
+		if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
+			dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
+
+			tmp = readb(S3C2410_RTCCON);
+			writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON);
+		}
+
+		if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
+			dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
+
+			tmp = readb(S3C2410_RTCCON);
+			writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON);
+		}
+	}
+}
+
+static int s3c_rtc_remove(struct platform_device *dev)
+{
+	struct rtc_device *rtc = platform_get_drvdata(dev);
+
+	platform_set_drvdata(dev, NULL);
+	rtc_device_unregister(rtc);
+
+	s3c_rtc_setpie(0);
+	s3c_rtc_setaie(0);
+
+	iounmap(s3c_rtc_base);
+	release_resource(s3c_rtc_mem);
+	kfree(s3c_rtc_mem);
+
+	return 0;
+}
+
+static int s3c_rtc_probe(struct platform_device *pdev)
+{
+	struct rtc_device *rtc;
+	struct resource *res;
+	int ret;
+
+	pr_debug("%s: probe=%p\n", __FUNCTION__, pdev);
+
+	/* find the IRQs */
+
+	s3c_rtc_tickno = platform_get_irq(pdev, 1);
+	if (s3c_rtc_tickno < 0) {
+		dev_err(&pdev->dev, "no irq for rtc tick\n");
+		return -ENOENT;
+	}
+
+	s3c_rtc_alarmno = platform_get_irq(pdev, 0);
+	if (s3c_rtc_alarmno < 0) {
+		dev_err(&pdev->dev, "no irq for alarm\n");
+		return -ENOENT;
+	}
+
+	pr_debug("s3c2410_rtc: tick irq %d, alarm irq %d\n",
+		 s3c_rtc_tickno, s3c_rtc_alarmno);
+
+	/* get the memory region */
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get memory region resource\n");
+		return -ENOENT;
+	}
+
+	s3c_rtc_mem = request_mem_region(res->start,
+					     res->end-res->start+1,
+					     pdev->name);
+
+	if (s3c_rtc_mem == NULL) {
+		dev_err(&pdev->dev, "failed to reserve memory region\n");
+		ret = -ENOENT;
+		goto err_nores;
+	}
+
+	s3c_rtc_base = ioremap(res->start, res->end - res->start + 1);
+	if (s3c_rtc_base == NULL) {
+		dev_err(&pdev->dev, "failed ioremap()\n");
+		ret = -EINVAL;
+		goto err_nomap;
+	}
+
+	/* check to see if everything is setup correctly */
+
+	s3c_rtc_enable(pdev, 1);
+
+ 	pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON));
+
+	s3c_rtc_setfreq(s3c_rtc_freq);
+
+	/* register RTC and exit */
+
+	rtc = rtc_device_register("s3c", &pdev->dev, &s3c_rtcops,
+				  THIS_MODULE);
+
+	if (IS_ERR(rtc)) {
+		dev_err(&pdev->dev, "cannot attach rtc\n");
+		ret = PTR_ERR(rtc);
+		goto err_nortc;
+	}
+
+	rtc->max_user_freq = 128;
+
+	platform_set_drvdata(pdev, rtc);
+	return 0;
+
+ err_nortc:
+	s3c_rtc_enable(pdev, 0);
+	iounmap(s3c_rtc_base);
+
+ err_nomap:
+	release_resource(s3c_rtc_mem);
+
+ err_nores:
+	return ret;
+}
+
+#ifdef CONFIG_PM
+
+/* RTC Power management control */
+
+static struct timespec s3c_rtc_delta;
+
+static int ticnt_save;
+
+static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct rtc_time tm;
+	struct timespec time;
+
+	time.tv_nsec = 0;
+
+	/* save TICNT for anyone using periodic interrupts */
+
+	ticnt_save = readb(S3C2410_TICNT);
+
+	/* calculate time delta for suspend */
+
+	s3c_rtc_gettime(&pdev->dev, &tm);
+	rtc_tm_to_time(&tm, &time.tv_sec);
+	save_time_delta(&s3c_rtc_delta, &time);
+	s3c_rtc_enable(pdev, 0);
+
+	return 0;
+}
+
+static int s3c_rtc_resume(struct platform_device *pdev)
+{
+	struct rtc_time tm;
+	struct timespec time;
+
+	time.tv_nsec = 0;
+
+	s3c_rtc_enable(pdev, 1);
+	s3c_rtc_gettime(&pdev->dev, &tm);
+	rtc_tm_to_time(&tm, &time.tv_sec);
+	restore_time_delta(&s3c_rtc_delta, &time);
+
+	writeb(ticnt_save, S3C2410_TICNT);
+	return 0;
+}
+#else
+#define s3c_rtc_suspend NULL
+#define s3c_rtc_resume  NULL
+#endif
+
+static struct platform_driver s3c2410_rtcdrv = {
+	.probe		= s3c_rtc_probe,
+	.remove		= s3c_rtc_remove,
+	.suspend	= s3c_rtc_suspend,
+	.resume		= s3c_rtc_resume,
+	.driver		= {
+		.name	= "s3c2410-rtc",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics\n";
+
+static int __init s3c_rtc_init(void)
+{
+	printk(banner);
+	return platform_driver_register(&s3c2410_rtcdrv);
+}
+
+static void __exit s3c_rtc_exit(void)
+{
+	platform_driver_unregister(&s3c2410_rtcdrv);
+}
+
+module_init(s3c_rtc_init);
+module_exit(s3c_rtc_exit);
+
+MODULE_DESCRIPTION("Samsung S3C RTC Driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 477eaa2..12dfdcf 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2932,6 +2932,11 @@
 			}
 			if (error)
 				goto out;
+			/*
+			 * file size is changed, ctime and mtime are
+			 * to be updated
+			 */
+			attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
 		}
 	}
 
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 95b878e..b01804b 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -217,48 +217,6 @@
 	return;
 }
 
-static struct page *ufs_get_locked_page(struct address_space *mapping,
-				  unsigned long index)
-{
-	struct page *page;
-
-try_again:
-	page = find_lock_page(mapping, index);
-	if (!page) {
-		page = read_cache_page(mapping, index,
-				       (filler_t*)mapping->a_ops->readpage,
-				       NULL);
-		if (IS_ERR(page)) {
-			printk(KERN_ERR "ufs_change_blocknr: "
-			       "read_cache_page error: ino %lu, index: %lu\n",
-			       mapping->host->i_ino, index);
-			goto out;
-		}
-
-		lock_page(page);
-
-		if (!PageUptodate(page) || PageError(page)) {
-			unlock_page(page);
-			page_cache_release(page);
-
-			printk(KERN_ERR "ufs_change_blocknr: "
-			       "can not read page: ino %lu, index: %lu\n",
-			       mapping->host->i_ino, index);
-
-			page = ERR_PTR(-EIO);
-			goto out;
-		}
-	}
-
-	if (unlikely(!page->mapping || !page_has_buffers(page))) {
-		unlock_page(page);
-		page_cache_release(page);
-		goto try_again;/*we really need these buffers*/
-	}
-out:
-	return page;
-}
-
 /*
  * Modify inode page cache in such way:
  * have - blocks with b_blocknr equal to oldb...oldb+count-1
@@ -311,10 +269,8 @@
 
 		set_page_dirty(page);
 
-		if (likely(cur_index != index)) {
-			unlock_page(page);
-			page_cache_release(page);
-		}
+		if (likely(cur_index != index))
+			ufs_put_locked_page(page);
  	}
 	UFSD("EXIT\n");
 }
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index 0e50015..a9c6e5f 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -60,7 +60,3 @@
 	.fsync		= ufs_sync_file,
 	.sendfile	= generic_file_sendfile,
 };
-
-struct inode_operations ufs_file_inode_operations = {
-	.truncate	= ufs_truncate,
-};
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 488b5ff..e7c8615 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -843,14 +843,17 @@
 
 void ufs_delete_inode (struct inode * inode)
 {
+	loff_t old_i_size;
+
 	truncate_inode_pages(&inode->i_data, 0);
 	/*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
 	lock_kernel();
 	mark_inode_dirty(inode);
 	ufs_update_inode(inode, IS_SYNC(inode));
+	old_i_size = inode->i_size;
 	inode->i_size = 0;
-	if (inode->i_blocks)
-		ufs_truncate (inode);
+	if (inode->i_blocks && ufs_truncate(inode, old_i_size))
+		ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n");
 	ufs_free_inode (inode);
 	unlock_kernel();
 }
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 3c3b301..c9b5587 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -369,24 +369,97 @@
 	UFSD("EXIT\n");
 	return retry;
 }
-		
-void ufs_truncate (struct inode * inode)
+
+static int ufs_alloc_lastblock(struct inode *inode)
+{
+	int err = 0;
+	struct address_space *mapping = inode->i_mapping;
+	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
+	struct ufs_inode_info *ufsi = UFS_I(inode);
+	unsigned lastfrag, i, end;
+	struct page *lastpage;
+	struct buffer_head *bh;
+
+	lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
+
+	if (!lastfrag) {
+		ufsi->i_lastfrag = 0;
+		goto out;
+	}
+	lastfrag--;
+
+	lastpage = ufs_get_locked_page(mapping, lastfrag >>
+				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
+       if (IS_ERR(lastpage)) {
+               err = -EIO;
+               goto out;
+       }
+
+       end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
+       bh = page_buffers(lastpage);
+       for (i = 0; i < end; ++i)
+               bh = bh->b_this_page;
+
+       if (!buffer_mapped(bh)) {
+               err = ufs_getfrag_block(inode, lastfrag, bh, 1);
+
+               if (unlikely(err))
+                       goto out_unlock;
+
+               if (buffer_new(bh)) {
+                       clear_buffer_new(bh);
+                       unmap_underlying_metadata(bh->b_bdev,
+						 bh->b_blocknr);
+		       /*
+			* we do not zeroize fragment, because of
+			* if it maped to hole, it already contains zeroes
+			*/
+                       set_buffer_uptodate(bh);
+                       mark_buffer_dirty(bh);
+                       set_page_dirty(lastpage);
+               }
+       }
+out_unlock:
+       ufs_put_locked_page(lastpage);
+out:
+       return err;
+}
+
+int ufs_truncate(struct inode *inode, loff_t old_i_size)
 {
 	struct ufs_inode_info *ufsi = UFS_I(inode);
-	struct super_block * sb;
-	struct ufs_sb_private_info * uspi;
-	int retry;
+	struct super_block *sb = inode->i_sb;
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	int retry, err = 0;
 	
 	UFSD("ENTER\n");
-	sb = inode->i_sb;
-	uspi = UFS_SB(sb)->s_uspi;
 
-	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
-		return;
+	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	      S_ISLNK(inode->i_mode)))
+		return -EINVAL;
 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-		return;
+		return -EPERM;
 
-	block_truncate_page(inode->i_mapping,	inode->i_size, ufs_getfrag_block);
+	if (inode->i_size > old_i_size) {
+		/*
+		 * if we expand file we should care about
+		 * allocation of block for last byte first of all
+		 */
+		err = ufs_alloc_lastblock(inode);
+
+		if (err) {
+			i_size_write(inode, old_i_size);
+			goto out;
+		}
+		/*
+		 * go away, because of we expand file, and we do not
+		 * need free blocks, and zeroizes page
+		 */
+		lock_kernel();
+		goto almost_end;
+	}
+
+	block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
 
 	lock_kernel();
 	while (1) {
@@ -404,9 +477,58 @@
 		yield();
 	}
 
+	if (inode->i_size < old_i_size) {
+		/*
+		 * now we should have enough space
+		 * to allocate block for last byte
+		 */
+		err = ufs_alloc_lastblock(inode);
+		if (err)
+			/*
+			 * looks like all the same - we have no space,
+			 * but we truncate file already
+			 */
+			inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize;
+	}
+almost_end:
 	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-	ufsi->i_lastfrag = DIRECT_FRAGMENT;
 	unlock_kernel();
 	mark_inode_dirty(inode);
-	UFSD("EXIT\n");
+out:
+	UFSD("EXIT: err %d\n", err);
+	return err;
 }
+
+
+/*
+ * We don't define our `inode->i_op->truncate', and call it here,
+ * because of:
+ * - there is no way to know old size
+ * - there is no way inform user about error, if it happens in `truncate'
+ */
+static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	unsigned int ia_valid = attr->ia_valid;
+	int error;
+
+	error = inode_change_ok(inode, attr);
+	if (error)
+		return error;
+
+	if (ia_valid & ATTR_SIZE &&
+	    attr->ia_size != i_size_read(inode)) {
+		loff_t old_i_size = inode->i_size;
+		error = vmtruncate(inode, attr->ia_size);
+		if (error)
+			return error;
+		error = ufs_truncate(inode, old_i_size);
+		if (error)
+			return error;
+	}
+	return inode_setattr(inode, attr);
+}
+
+struct inode_operations ufs_file_inode_operations = {
+	.setattr = ufs_setattr,
+};
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index a2f13f4..337cf2c 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -233,3 +233,57 @@
 	else
 		ufsi->i_u1.i_data[0] = fs32;
 }
+
+/**
+ * ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist
+ * read it from disk.
+ * @mapping: the address_space to search
+ * @index: the page index
+ *
+ * Locates the desired pagecache page, if not exist we'll read it,
+ * locks it, increments its reference
+ * count and returns its address.
+ *
+ */
+
+struct page *ufs_get_locked_page(struct address_space *mapping,
+				 pgoff_t index)
+{
+	struct page *page;
+
+try_again:
+	page = find_lock_page(mapping, index);
+	if (!page) {
+		page = read_cache_page(mapping, index,
+				       (filler_t*)mapping->a_ops->readpage,
+				       NULL);
+		if (IS_ERR(page)) {
+			printk(KERN_ERR "ufs_change_blocknr: "
+			       "read_cache_page error: ino %lu, index: %lu\n",
+			       mapping->host->i_ino, index);
+			goto out;
+		}
+
+		lock_page(page);
+
+		if (!PageUptodate(page) || PageError(page)) {
+			unlock_page(page);
+			page_cache_release(page);
+
+			printk(KERN_ERR "ufs_change_blocknr: "
+			       "can not read page: ino %lu, index: %lu\n",
+			       mapping->host->i_ino, index);
+
+			page = ERR_PTR(-EIO);
+			goto out;
+		}
+	}
+
+	if (unlikely(!page->mapping || !page_has_buffers(page))) {
+		unlock_page(page);
+		page_cache_release(page);
+		goto try_again;/*we really need these buffers*/
+	}
+out:
+	return page;
+}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 406981f..28fce6c 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -251,6 +251,14 @@
 #define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size)
 extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
 
+/* This functions works with cache pages*/
+extern struct page *ufs_get_locked_page(struct address_space *mapping,
+					pgoff_t index);
+static inline void ufs_put_locked_page(struct page *page)
+{
+       unlock_page(page);
+       page_cache_release(page);
+}
 
 
 /*
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
new file mode 100644
index 0000000..cb05bf6
--- /dev/null
+++ b/include/asm-generic/audit_change_attr.h
@@ -0,0 +1,18 @@
+__NR_chmod,
+__NR_fchmod,
+__NR_chown,
+__NR_fchown,
+__NR_lchown,
+__NR_setxattr,
+__NR_lsetxattr,
+__NR_fsetxattr,
+__NR_removexattr,
+__NR_lremovexattr,
+__NR_fremovexattr,
+__NR_fchownat,
+__NR_fchmodat,
+#ifdef __NR_chown32
+__NR_chown32,
+__NR_fchown32,
+__NR_lchown32,
+#endif
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
new file mode 100644
index 0000000..161a7a5
--- /dev/null
+++ b/include/asm-generic/audit_dir_write.h
@@ -0,0 +1,14 @@
+__NR_rename,
+__NR_mkdir,
+__NR_rmdir,
+__NR_creat,
+__NR_link,
+__NR_unlink,
+__NR_symlink,
+__NR_mknod,
+__NR_mkdirat,
+__NR_mknodat,
+__NR_unlinkat,
+__NR_renameat,
+__NR_linkat,
+__NR_symlinkat,
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 0b49f9e..962cad7 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,5 +14,6 @@
 extern char __per_cpu_start[], __per_cpu_end[];
 extern char __kprobes_text_start[], __kprobes_text_end[];
 extern char __initdata_begin[], __initdata_end[];
+extern char __start_rodata[], __end_rodata[];
 
 #endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index c61bd1a..96adbab 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -19,11 +19,19 @@
 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 
 struct module;
+#ifdef CONFIG_SMP
 extern void alternatives_smp_module_add(struct module *mod, char *name,
 					void *locks, void *locks_end,
 					void *text, void *text_end);
 extern void alternatives_smp_module_del(struct module *mod);
 extern void alternatives_smp_switch(int smp);
+#else
+static inline void alternatives_smp_module_add(struct module *mod, char *name,
+					void *locks, void *locks_end,
+					void *text, void *text_end) {}
+static inline void alternatives_smp_module_del(struct module *mod) {}
+static inline void alternatives_smp_switch(int smp) {}
+#endif
 
 #endif
 
diff --git a/include/asm-um/kmap_types.h b/include/asm-um/kmap_types.h
index 0b22ad7..6c03acd 100644
--- a/include/asm-um/kmap_types.h
+++ b/include/asm-um/kmap_types.h
@@ -6,6 +6,24 @@
 #ifndef __UM_KMAP_TYPES_H
 #define __UM_KMAP_TYPES_H
 
-#include "asm/arch/kmap_types.h"
+/* No more #include "asm/arch/kmap_types.h" ! */
+
+enum km_type {
+	KM_BOUNCE_READ,
+	KM_SKB_SUNRPC_DATA,
+	KM_SKB_DATA_SOFTIRQ,
+	KM_USER0,
+	KM_USER1,
+	KM_UML_USERCOPY,	/* UML specific, for copy_*_user - used in do_op_one_page */
+	KM_BIO_SRC_IRQ,
+	KM_BIO_DST_IRQ,
+	KM_PTE0,
+	KM_PTE1,
+	KM_IRQ0,
+	KM_IRQ1,
+	KM_SOFTIRQ0,
+	KM_SOFTIRQ1,
+	KM_TYPE_NR
+};
 
 #endif
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index 387c8f6..aa67bfd 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -17,11 +17,20 @@
 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 
 struct module;
+
+#ifdef CONFIG_SMP
 extern void alternatives_smp_module_add(struct module *mod, char *name,
 					void *locks, void *locks_end,
 					void *text, void *text_end);
 extern void alternatives_smp_module_del(struct module *mod);
 extern void alternatives_smp_switch(int smp);
+#else
+static inline void alternatives_smp_module_add(struct module *mod, char *name,
+					void *locks, void *locks_end,
+					void *text, void *text_end) {}
+static inline void alternatives_smp_module_del(struct module *mod) {}
+static inline void alternatives_smp_switch(int smp) {}
+#endif
 
 #endif
 
diff --git a/include/linux/audit.h b/include/linux/audit.h
index e051ff9..b27d7de 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -122,10 +122,17 @@
 /* Rule structure sizes -- if these change, different AUDIT_ADD and
  * AUDIT_LIST commands must be implemented. */
 #define AUDIT_MAX_FIELDS   64
+#define AUDIT_MAX_KEY_LEN  32
 #define AUDIT_BITMASK_SIZE 64
 #define AUDIT_WORD(nr) ((__u32)((nr)/32))
 #define AUDIT_BIT(nr)  (1 << ((nr) - AUDIT_WORD(nr)*32))
 
+#define AUDIT_SYSCALL_CLASSES 16
+#define AUDIT_CLASS_DIR_WRITE 0
+#define AUDIT_CLASS_DIR_WRITE_32 1
+#define AUDIT_CLASS_CHATTR 2
+#define AUDIT_CLASS_CHATTR_32 3
+
 /* This bitmask is used to validate user input.  It represents all bits that
  * are currently used in an audit field constant understood by the kernel.
  * If you are adding a new #define AUDIT_<whatever>, please ensure that
@@ -150,12 +157,17 @@
 #define AUDIT_PERS	10
 #define AUDIT_ARCH	11
 #define AUDIT_MSGTYPE	12
-#define AUDIT_SE_USER	13	/* security label user */
-#define AUDIT_SE_ROLE	14	/* security label role */
-#define AUDIT_SE_TYPE	15	/* security label type */
-#define AUDIT_SE_SEN	16	/* security label sensitivity label */
-#define AUDIT_SE_CLR	17	/* security label clearance label */
+#define AUDIT_SUBJ_USER	13	/* security label user */
+#define AUDIT_SUBJ_ROLE	14	/* security label role */
+#define AUDIT_SUBJ_TYPE	15	/* security label type */
+#define AUDIT_SUBJ_SEN	16	/* security label sensitivity label */
+#define AUDIT_SUBJ_CLR	17	/* security label clearance label */
 #define AUDIT_PPID	18
+#define AUDIT_OBJ_USER	19
+#define AUDIT_OBJ_ROLE	20
+#define AUDIT_OBJ_TYPE	21
+#define AUDIT_OBJ_LEV_LOW	22
+#define AUDIT_OBJ_LEV_HIGH	23
 
 				/* These are ONLY useful when checking
 				 * at syscall exit time (AUDIT_AT_EXIT). */
@@ -171,6 +183,8 @@
 #define AUDIT_ARG2      (AUDIT_ARG0+2)
 #define AUDIT_ARG3      (AUDIT_ARG0+3)
 
+#define AUDIT_FILTERKEY	210
+
 #define AUDIT_NEGATE			0x80000000
 
 /* These are the supported operators.
@@ -299,6 +313,7 @@
 #define AUDITSC_SUCCESS 1
 #define AUDITSC_FAILURE 2
 #define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS )
+extern int __init audit_register_class(int class, unsigned *list);
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
 				/* Public API */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a3caf68..44a11f1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -87,9 +87,9 @@
 #define lock_cpu_hotplug()	do { } while (0)
 #define unlock_cpu_hotplug()	do { } while (0)
 #define lock_cpu_hotplug_interruptible() 0
-#define hotcpu_notifier(fn, pri)
-#define register_hotcpu_notifier(nb)
-#define unregister_hotcpu_notifier(nb)
+#define hotcpu_notifier(fn, pri)	do { } while (0)
+#define register_hotcpu_notifier(nb)	do { } while (0)
+#define unregister_hotcpu_notifier(nb)	do { } while (0)
 
 /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
 static inline int cpu_is_offline(int cpu) { return 0; }
diff --git a/include/linux/err.h b/include/linux/err.h
index ff71d2a..cd3b367 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -13,7 +13,9 @@
  * This should be a per-architecture thing, to allow different
  * error and pointer decisions.
  */
-#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
+#define MAX_ERRNO	4095
+
+#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
 static inline void *ERR_PTR(long error)
 {
diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h
index e39b7cc..fc62887 100644
--- a/include/linux/ufs_fs.h
+++ b/include/linux/ufs_fs.h
@@ -993,7 +993,7 @@
 extern struct inode_operations ufs_fast_symlink_inode_operations;
 
 /* truncate.c */
-extern void ufs_truncate (struct inode *);
+extern int ufs_truncate (struct inode *, loff_t);
 
 static inline struct ufs_sb_info *UFS_SB(struct super_block *sb)
 {
diff --git a/kernel/audit.h b/kernel/audit.h
index 8323e41..6aa33b8 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -81,6 +81,7 @@
 	u32			mask[AUDIT_BITMASK_SIZE];
 	u32			buflen; /* for data alloc on list rules */
 	u32			field_count;
+	char			*filterkey; /* ties events to rules */
 	struct audit_field	*fields;
 	struct audit_field	*inode_f; /* quick access to an inode field */
 	struct audit_watch	*watch;	/* associated watch */
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 4c99d2c..5b4e162 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -141,6 +141,7 @@
 			selinux_audit_rule_free(f->se_rule);
 		}
 	kfree(e->rule.fields);
+	kfree(e->rule.filterkey);
 	kfree(e);
 }
 
@@ -278,6 +279,29 @@
 	return 0;
 }
 
+static __u32 *classes[AUDIT_SYSCALL_CLASSES];
+
+int __init audit_register_class(int class, unsigned *list)
+{
+	__u32 *p = kzalloc(AUDIT_BITMASK_SIZE * sizeof(__u32), GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+	while (*list != ~0U) {
+		unsigned n = *list++;
+		if (n >= AUDIT_BITMASK_SIZE * 32 - AUDIT_SYSCALL_CLASSES) {
+			kfree(p);
+			return -EINVAL;
+		}
+		p[AUDIT_WORD(n)] |= AUDIT_BIT(n);
+	}
+	if (class >= AUDIT_SYSCALL_CLASSES || classes[class]) {
+		kfree(p);
+		return -EINVAL;
+	}
+	classes[class] = p;
+	return 0;
+}
+
 /* Common user-space to kernel rule translation. */
 static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
 {
@@ -321,6 +345,22 @@
 	for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
 		entry->rule.mask[i] = rule->mask[i];
 
+	for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) {
+		int bit = AUDIT_BITMASK_SIZE * 32 - i - 1;
+		__u32 *p = &entry->rule.mask[AUDIT_WORD(bit)];
+		__u32 *class;
+
+		if (!(*p & AUDIT_BIT(bit)))
+			continue;
+		*p &= ~AUDIT_BIT(bit);
+		class = classes[i];
+		if (class) {
+			int j;
+			for (j = 0; j < AUDIT_BITMASK_SIZE; j++)
+				entry->rule.mask[j] |= class[j];
+		}
+	}
+
 	return entry;
 
 exit_err:
@@ -469,11 +509,16 @@
 		case AUDIT_ARG2:
 		case AUDIT_ARG3:
 			break;
-		case AUDIT_SE_USER:
-		case AUDIT_SE_ROLE:
-		case AUDIT_SE_TYPE:
-		case AUDIT_SE_SEN:
-		case AUDIT_SE_CLR:
+		case AUDIT_SUBJ_USER:
+		case AUDIT_SUBJ_ROLE:
+		case AUDIT_SUBJ_TYPE:
+		case AUDIT_SUBJ_SEN:
+		case AUDIT_SUBJ_CLR:
+		case AUDIT_OBJ_USER:
+		case AUDIT_OBJ_ROLE:
+		case AUDIT_OBJ_TYPE:
+		case AUDIT_OBJ_LEV_LOW:
+		case AUDIT_OBJ_LEV_HIGH:
 			str = audit_unpack_string(&bufp, &remain, f->val);
 			if (IS_ERR(str))
 				goto exit_free;
@@ -511,6 +556,16 @@
 			if (err)
 				goto exit_free;
 			break;
+		case AUDIT_FILTERKEY:
+			err = -EINVAL;
+			if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
+				goto exit_free;
+			str = audit_unpack_string(&bufp, &remain, f->val);
+			if (IS_ERR(str))
+				goto exit_free;
+			entry->rule.buflen += f->val;
+			entry->rule.filterkey = str;
+			break;
 		default:
 			goto exit_free;
 		}
@@ -600,11 +655,16 @@
 		data->fields[i] = f->type;
 		data->fieldflags[i] = f->op;
 		switch(f->type) {
-		case AUDIT_SE_USER:
-		case AUDIT_SE_ROLE:
-		case AUDIT_SE_TYPE:
-		case AUDIT_SE_SEN:
-		case AUDIT_SE_CLR:
+		case AUDIT_SUBJ_USER:
+		case AUDIT_SUBJ_ROLE:
+		case AUDIT_SUBJ_TYPE:
+		case AUDIT_SUBJ_SEN:
+		case AUDIT_SUBJ_CLR:
+		case AUDIT_OBJ_USER:
+		case AUDIT_OBJ_ROLE:
+		case AUDIT_OBJ_TYPE:
+		case AUDIT_OBJ_LEV_LOW:
+		case AUDIT_OBJ_LEV_HIGH:
 			data->buflen += data->values[i] =
 				audit_pack_string(&bufp, f->se_str);
 			break;
@@ -612,6 +672,10 @@
 			data->buflen += data->values[i] =
 				audit_pack_string(&bufp, krule->watch->path);
 			break;
+		case AUDIT_FILTERKEY:
+			data->buflen += data->values[i] =
+				audit_pack_string(&bufp, krule->filterkey);
+			break;
 		default:
 			data->values[i] = f->val;
 		}
@@ -639,11 +703,16 @@
 			return 1;
 
 		switch(a->fields[i].type) {
-		case AUDIT_SE_USER:
-		case AUDIT_SE_ROLE:
-		case AUDIT_SE_TYPE:
-		case AUDIT_SE_SEN:
-		case AUDIT_SE_CLR:
+		case AUDIT_SUBJ_USER:
+		case AUDIT_SUBJ_ROLE:
+		case AUDIT_SUBJ_TYPE:
+		case AUDIT_SUBJ_SEN:
+		case AUDIT_SUBJ_CLR:
+		case AUDIT_OBJ_USER:
+		case AUDIT_OBJ_ROLE:
+		case AUDIT_OBJ_TYPE:
+		case AUDIT_OBJ_LEV_LOW:
+		case AUDIT_OBJ_LEV_HIGH:
 			if (strcmp(a->fields[i].se_str, b->fields[i].se_str))
 				return 1;
 			break;
@@ -651,6 +720,11 @@
 			if (strcmp(a->watch->path, b->watch->path))
 				return 1;
 			break;
+		case AUDIT_FILTERKEY:
+			/* both filterkeys exist based on above type compare */
+			if (strcmp(a->filterkey, b->filterkey))
+				return 1;
+			break;
 		default:
 			if (a->fields[i].val != b->fields[i].val)
 				return 1;
@@ -730,6 +804,7 @@
 	u32 fcount = old->field_count;
 	struct audit_entry *entry;
 	struct audit_krule *new;
+	char *fk;
 	int i, err = 0;
 
 	entry = audit_init_entry(fcount);
@@ -753,13 +828,25 @@
 	 * the originals will all be freed when the old rule is freed. */
 	for (i = 0; i < fcount; i++) {
 		switch (new->fields[i].type) {
-		case AUDIT_SE_USER:
-		case AUDIT_SE_ROLE:
-		case AUDIT_SE_TYPE:
-		case AUDIT_SE_SEN:
-		case AUDIT_SE_CLR:
+		case AUDIT_SUBJ_USER:
+		case AUDIT_SUBJ_ROLE:
+		case AUDIT_SUBJ_TYPE:
+		case AUDIT_SUBJ_SEN:
+		case AUDIT_SUBJ_CLR:
+		case AUDIT_OBJ_USER:
+		case AUDIT_OBJ_ROLE:
+		case AUDIT_OBJ_TYPE:
+		case AUDIT_OBJ_LEV_LOW:
+		case AUDIT_OBJ_LEV_HIGH:
 			err = audit_dupe_selinux_field(&new->fields[i],
 						       &old->fields[i]);
+			break;
+		case AUDIT_FILTERKEY:
+			fk = kstrdup(old->filterkey, GFP_KERNEL);
+			if (unlikely(!fk))
+				err = -ENOMEM;
+			else
+				new->filterkey = fk;
 		}
 		if (err) {
 			audit_free_rule(entry);
@@ -1245,6 +1332,34 @@
 		skb_queue_tail(q, skb);
 }
 
+/* Log rule additions and removals */
+static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action,
+				  struct audit_krule *rule, int res)
+{
+	struct audit_buffer *ab;
+
+	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+	if (!ab)
+		return;
+	audit_log_format(ab, "auid=%u", loginuid);
+	if (sid) {
+		char *ctx = NULL;
+		u32 len;
+		if (selinux_ctxid_to_string(sid, &ctx, &len))
+			audit_log_format(ab, " ssid=%u", sid);
+		else
+			audit_log_format(ab, " subj=%s", ctx);
+		kfree(ctx);
+	}
+	audit_log_format(ab, " %s rule key=", action);
+	if (rule->filterkey)
+		audit_log_untrustedstring(ab, rule->filterkey);
+	else
+		audit_log_format(ab, "(null)");
+	audit_log_format(ab, " list=%d res=%d", rule->listnr, res);
+	audit_log_end(ab);
+}
+
 /**
  * audit_receive_filter - apply all rules to the specified message type
  * @type: audit message type
@@ -1304,24 +1419,7 @@
 
 		err = audit_add_rule(entry,
 				     &audit_filter_list[entry->rule.listnr]);
-
-		if (sid) {
-			char *ctx = NULL;
-			u32 len;
-			if (selinux_ctxid_to_string(sid, &ctx, &len)) {
-				/* Maybe call audit_panic? */
-				audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
-				 "auid=%u ssid=%u add rule to list=%d res=%d",
-				 loginuid, sid, entry->rule.listnr, !err);
-			} else
-				audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
-				 "auid=%u subj=%s add rule to list=%d res=%d",
-				 loginuid, ctx, entry->rule.listnr, !err);
-			kfree(ctx);
-		} else
-			audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
-				"auid=%u add rule to list=%d res=%d",
-				loginuid, entry->rule.listnr, !err);
+		audit_log_rule_change(loginuid, sid, "add", &entry->rule, !err);
 
 		if (err)
 			audit_free_rule(entry);
@@ -1337,24 +1435,8 @@
 
 		err = audit_del_rule(entry,
 				     &audit_filter_list[entry->rule.listnr]);
-
-		if (sid) {
-			char *ctx = NULL;
-			u32 len;
-			if (selinux_ctxid_to_string(sid, &ctx, &len)) {
-				/* Maybe call audit_panic? */
-				audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
-					"auid=%u ssid=%u remove rule from list=%d res=%d",
-					 loginuid, sid, entry->rule.listnr, !err);
-			} else
-				audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
-					"auid=%u subj=%s remove rule from list=%d res=%d",
-					 loginuid, ctx, entry->rule.listnr, !err);
-			kfree(ctx);
-		} else
-			audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
-				"auid=%u remove rule from list=%d res=%d",
-				loginuid, entry->rule.listnr, !err);
+		audit_log_rule_change(loginuid, sid, "remove", &entry->rule,
+				      !err);
 
 		audit_free_rule(entry);
 		break;
@@ -1514,11 +1596,16 @@
 	for (i = 0; i < rule->field_count; i++) {
 		struct audit_field *f = &rule->fields[i];
 		switch (f->type) {
-		case AUDIT_SE_USER:
-		case AUDIT_SE_ROLE:
-		case AUDIT_SE_TYPE:
-		case AUDIT_SE_SEN:
-		case AUDIT_SE_CLR:
+		case AUDIT_SUBJ_USER:
+		case AUDIT_SUBJ_ROLE:
+		case AUDIT_SUBJ_TYPE:
+		case AUDIT_SUBJ_SEN:
+		case AUDIT_SUBJ_CLR:
+		case AUDIT_OBJ_USER:
+		case AUDIT_OBJ_ROLE:
+		case AUDIT_OBJ_TYPE:
+		case AUDIT_OBJ_LEV_LOW:
+		case AUDIT_OBJ_LEV_HIGH:
 			return 1;
 		}
 	}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index dc5e3f0..ae40ac8 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -186,6 +186,7 @@
 	int		    auditable;  /* 1 if record should be written */
 	int		    name_count;
 	struct audit_names  names[AUDIT_NAMES];
+	char *		    filterkey;	/* key for rule that triggered record */
 	struct dentry *	    pwd;
 	struct vfsmount *   pwdmnt;
 	struct audit_context *previous; /* For nested syscalls */
@@ -320,11 +321,11 @@
 			if (ctx)
 				result = audit_comparator(ctx->loginuid, f->op, f->val);
 			break;
-		case AUDIT_SE_USER:
-		case AUDIT_SE_ROLE:
-		case AUDIT_SE_TYPE:
-		case AUDIT_SE_SEN:
-		case AUDIT_SE_CLR:
+		case AUDIT_SUBJ_USER:
+		case AUDIT_SUBJ_ROLE:
+		case AUDIT_SUBJ_TYPE:
+		case AUDIT_SUBJ_SEN:
+		case AUDIT_SUBJ_CLR:
 			/* NOTE: this may return negative values indicating
 			   a temporary error.  We simply treat this as a
 			   match for now to avoid losing information that
@@ -341,6 +342,46 @@
 				                                  ctx);
 			}
 			break;
+		case AUDIT_OBJ_USER:
+		case AUDIT_OBJ_ROLE:
+		case AUDIT_OBJ_TYPE:
+		case AUDIT_OBJ_LEV_LOW:
+		case AUDIT_OBJ_LEV_HIGH:
+			/* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR
+			   also applies here */
+			if (f->se_rule) {
+				/* Find files that match */
+				if (name) {
+					result = selinux_audit_rule_match(
+					           name->osid, f->type, f->op,
+					           f->se_rule, ctx);
+				} else if (ctx) {
+					for (j = 0; j < ctx->name_count; j++) {
+						if (selinux_audit_rule_match(
+						      ctx->names[j].osid,
+						      f->type, f->op,
+						      f->se_rule, ctx)) {
+							++result;
+							break;
+						}
+					}
+				}
+				/* Find ipc objects that match */
+				if (ctx) {
+					struct audit_aux_data *aux;
+					for (aux = ctx->aux; aux;
+					     aux = aux->next) {
+						if (aux->type == AUDIT_IPC) {
+							struct audit_aux_data_ipcctl *axi = (void *)aux;
+							if (selinux_audit_rule_match(axi->osid, f->type, f->op, f->se_rule, ctx)) {
+								++result;
+								break;
+							}
+						}
+					}
+				}
+			}
+			break;
 		case AUDIT_ARG0:
 		case AUDIT_ARG1:
 		case AUDIT_ARG2:
@@ -348,11 +389,17 @@
 			if (ctx)
 				result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val);
 			break;
+		case AUDIT_FILTERKEY:
+			/* ignore this field for filtering */
+			result = 1;
+			break;
 		}
 
 		if (!result)
 			return 0;
 	}
+	if (rule->filterkey)
+		ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
 	switch (rule->action) {
 	case AUDIT_NEVER:    *state = AUDIT_DISABLED;	    break;
 	case AUDIT_ALWAYS:   *state = AUDIT_RECORD_CONTEXT; break;
@@ -627,6 +674,7 @@
 		}
 		audit_free_names(context);
 		audit_free_aux(context);
+		kfree(context->filterkey);
 		kfree(context);
 		context  = previous;
 	} while (context);
@@ -735,6 +783,11 @@
 		  context->euid, context->suid, context->fsuid,
 		  context->egid, context->sgid, context->fsgid, tty);
 	audit_log_task_info(ab, tsk);
+	if (context->filterkey) {
+		audit_log_format(ab, " key=");
+		audit_log_untrustedstring(ab, context->filterkey);
+	} else
+		audit_log_format(ab, " key=(null)");
 	audit_log_end(ab);
 
 	for (aux = context->aux; aux; aux = aux->next) {
@@ -1060,6 +1113,8 @@
 	} else {
 		audit_free_names(context);
 		audit_free_aux(context);
+		kfree(context->filterkey);
+		context->filterkey = NULL;
 		tsk->audit_context = context;
 	}
 }
diff --git a/kernel/futex.c b/kernel/futex.c
index 6c91f93..15caf93 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -630,8 +630,10 @@
 
 	list_for_each_entry_safe(this, next, head, list) {
 		if (match_futex (&this->key, &key)) {
-			if (this->pi_state)
-				return -EINVAL;
+			if (this->pi_state) {
+				ret = -EINVAL;
+				break;
+			}
 			wake_futex(this);
 			if (++ret >= nr_wake)
 				break;
@@ -1208,7 +1210,7 @@
 	}
 
 	down_read(&curr->mm->mmap_sem);
-	hb = queue_lock(&q, -1, NULL);
+	spin_lock(q.lock_ptr);
 
 	/*
 	 * Got the lock. We might not be the anticipated owner if we
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index b7117e8..fcce518 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -114,7 +114,7 @@
 	spin_lock_irqsave(&desc->lock, flags);
 	switch (desc->depth) {
 	case 0:
-		printk(KERN_WARNING "Unablanced enable_irq(%d)\n", irq);
+		printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
 		WARN_ON(1);
 		break;
 	case 1: {
@@ -236,7 +236,8 @@
 
 #if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
 		/* All handlers must agree on per-cpuness */
-		if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU))
+		if ((old->flags & SA_PERCPU_IRQ) !=
+		    (new->flags & SA_PERCPU_IRQ))
 			goto mismatch;
 #endif
 
@@ -266,9 +267,10 @@
 				 * SA_TRIGGER_* but the PIC does not support
 				 * multiple flow-types?
 				 */
-				printk(KERN_WARNING "setup_irq(%d) SA_TRIGGER"
-				       "set. No set_type function available\n",
-				       irq);
+				printk(KERN_WARNING "No SA_TRIGGER set_type "
+				       "function for IRQ %d (%s)\n", irq,
+				       desc->chip ? desc->chip->name :
+				       "unknown");
 		} else
 			compat_irq_chip_set_default_handler(desc);
 
@@ -298,7 +300,7 @@
 mismatch:
 	spin_unlock_irqrestore(&desc->lock, flags);
 	if (!(new->flags & SA_PROBEIRQ)) {
-		printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__);
+		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
 		dump_stack();
 	}
 	return -EBUSY;
@@ -365,7 +367,7 @@
 			kfree(action);
 			return;
 		}
-		printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
+		printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
 		spin_unlock_irqrestore(&desc->lock, flags);
 		return;
 	}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index b0d067b..2180c88 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -13,6 +13,10 @@
 depfile = $(subst $(comma),_,$(@D)/.$(@F).d)
 
 ###
+# filename of target with directory and extension stripped
+basetarget = $(basename $(notdir $@))
+
+###
 # Escape single quote for use in echo statements
 escsq = $(subst $(squote),'\$(squote)',$1)
 
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 02a7eea..3cb445c 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -117,7 +117,7 @@
 $(obj-m)              : quiet_modtag := [M]
 
 # Default for not multi-part modules
-modname = $(*F)
+modname = $(basetarget)
 
 $(multi-objs-m)         : modname = $(modname-multi)
 $(multi-objs-m:.o=.i)   : modname = $(modname-multi)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 2b066d1..18ecd4d 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -80,8 +80,10 @@
 #####
 # Handle options to gcc. Support building with separate output directory
 
-_hostc_flags   = $(HOSTCFLAGS)   $(HOST_EXTRACFLAGS)   $(HOSTCFLAGS_$(*F).o)
-_hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) $(HOSTCXXFLAGS_$(*F).o)
+_hostc_flags   = $(HOSTCFLAGS)   $(HOST_EXTRACFLAGS)   \
+                 $(HOSTCFLAGS_$(basetarget).o)
+_hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
+                 $(HOSTCXXFLAGS_$(basetarget).o)
 
 ifeq ($(KBUILD_SRC),)
 __hostc_flags	= $(_hostc_flags)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 2cb4935..fc498fe 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -82,12 +82,12 @@
 #       than one module. In that case KBUILD_MODNAME will be set to foo_bar,
 #       where foo and bar are the name of the modules.
 name-fix = $(subst $(comma),_,$(subst -,_,$1))
-basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(*F)))"
+basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
 modname_flags  = $(if $(filter 1,$(words $(modname))),\
                  -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
 
-_c_flags       = $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$(*F).o)
-_a_flags       = $(AFLAGS) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
+_c_flags       = $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$(basetarget).o)
+_a_flags       = $(AFLAGS) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
 _cpp_flags     = $(CPPFLAGS) $(EXTRA_CPPFLAGS) $(CPPFLAGS_$(@F))
 
 # If building the kernel in a separate objtree expand all occurrences
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 576cce5..a495502 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -72,7 +72,7 @@
 # Step 5), compile all *.mod.c files
 
 # modname is set to make c_flags define KBUILD_MODNAME
-modname = $(*F)
+modname = $(notdir $(@:.mod.o=))
 
 quiet_cmd_cc_o_c = CC      $@
       cmd_cc_o_c = $(CC) $(c_flags) $(CFLAGS_MODULE)	\
diff --git a/scripts/kconfig/lxdialog/checklist.c b/scripts/kconfig/lxdialog/checklist.c
index be0200e..7988641 100644
--- a/scripts/kconfig/lxdialog/checklist.c
+++ b/scripts/kconfig/lxdialog/checklist.c
@@ -187,9 +187,12 @@
 
 	/* Print the list */
 	for (i = 0; i < max_choice; i++) {
-		print_item(list, items[(scroll + i) * 3 + 1],
-			   status[i + scroll], i, i == choice);
+		if (i != choice)
+			print_item(list, items[(scroll + i) * 3 + 1],
+				   status[i + scroll], i, 0);
 	}
+	print_item(list, items[(scroll + choice) * 3 + 1],
+		   status[choice + scroll], choice, 1);
 
 	print_arrows(dialog, choice, item_no, scroll,
 		     box_y, box_x + check_x + 5, list_height);
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 00e2129..f9460a6 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1056,7 +1056,8 @@
 	    # pointer-to-function
 	    print ".BI \"    ".$1."\" ".$parameter." \") (".$2.")"."\"\n;\n";
 	} elsif ($type =~ m/^(.*?)\s*(:.*)/) {
-	    print ".BI \"    ".$1."\" ".$parameter.$2." \""."\"\n;\n";
+	    # bitfield
+	    print ".BI \"    ".$1."\ \" ".$parameter.$2." \""."\"\n;\n";
 	} else {
 	    $type =~ s/([^\*])$/$1 /;
 	    print ".BI \"    ".$type."\" ".$parameter." \""."\"\n;\n";
@@ -1118,7 +1119,10 @@
     my %args = %{$_[0]};
     my ($parameter, $section);
 
-    print "Function:\n\n";
+    print "Name:\n\n";
+    print $args{'function'}." - ".$args{'purpose'}."\n";
+
+    print "\nSynopsis:\n\n";
     my $start=$args{'functiontype'}." ".$args{'function'}." (";
     print $start;
     my $count = 0;
@@ -1169,6 +1173,7 @@
     my $count;
     print "Enum:\n\n";
 
+    print "enum ".$args{'enum'}." - ".$args{'purpose'}."\n\n";
     print "enum ".$args{'enum'}." {\n";
     $count = 0;
     foreach $parameter (@{$args{'parameterlist'}}) {
@@ -1197,7 +1202,7 @@
     my $count;
     print "Typedef:\n\n";
 
-    print "typedef ".$args{'typedef'}."\n";
+    print "typedef ".$args{'typedef'}." - ".$args{'purpose'}."\n";
     output_section_text(@_);
 }
 
@@ -1206,7 +1211,7 @@
     my %args = %{$_[0]};
     my ($parameter);
 
-    print $args{'type'}." ".$args{'struct'}.":\n\n";
+    print $args{'type'}." ".$args{'struct'}." - ".$args{'purpose'}."\n\n";
     print $args{'type'}." ".$args{'struct'}." {\n";
     foreach $parameter (@{$args{'parameterlist'}}) {
 	if ($parameter =~ /^#/) {
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0dd1617..dfde0e8 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -24,7 +24,10 @@
 /* If we are modposting external module set to 1 */
 static int external_module = 0;
 /* How a symbol is exported */
-enum export {export_plain, export_gpl, export_gpl_future, export_unknown};
+enum export {
+	export_plain,      export_unused,     export_gpl,
+	export_unused_gpl, export_gpl_future, export_unknown
+};
 
 void fatal(const char *fmt, ...)
 {
@@ -191,7 +194,9 @@
 	enum export export;
 } export_list[] = {
 	{ .str = "EXPORT_SYMBOL",            .export = export_plain },
+	{ .str = "EXPORT_UNUSED_SYMBOL",     .export = export_unused },
 	{ .str = "EXPORT_SYMBOL_GPL",        .export = export_gpl },
+	{ .str = "EXPORT_UNUSED_SYMBOL_GPL", .export = export_unused_gpl },
 	{ .str = "EXPORT_SYMBOL_GPL_FUTURE", .export = export_gpl_future },
 	{ .str = "(unknown)",                .export = export_unknown },
 };
@@ -205,6 +210,8 @@
 static enum export export_no(const char * s)
 {
 	int i;
+	if (!s)
+		return export_unknown;
 	for (i = 0; export_list[i].export != export_unknown; i++) {
 		if (strcmp(export_list[i].str, s) == 0)
 			return export_list[i].export;
@@ -216,8 +223,12 @@
 {
 	if (sec == elf->export_sec)
 		return export_plain;
+	else if (sec == elf->export_unused_sec)
+		return export_unused;
 	else if (sec == elf->export_gpl_sec)
 		return export_gpl;
+	else if (sec == elf->export_unused_gpl_sec)
+		return export_unused_gpl;
 	else if (sec == elf->export_gpl_future_sec)
 		return export_gpl_future;
 	else
@@ -366,8 +377,12 @@
 			info->modinfo_len = sechdrs[i].sh_size;
 		} else if (strcmp(secname, "__ksymtab") == 0)
 			info->export_sec = i;
+		else if (strcmp(secname, "__ksymtab_unused") == 0)
+			info->export_unused_sec = i;
 		else if (strcmp(secname, "__ksymtab_gpl") == 0)
 			info->export_gpl_sec = i;
+		else if (strcmp(secname, "__ksymtab_unused_gpl") == 0)
+			info->export_unused_gpl_sec = i;
 		else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
 			info->export_gpl_future_sec = i;
 
@@ -1085,38 +1100,64 @@
 	buf->pos += len;
 }
 
-void check_license(struct module *mod)
+static void check_for_gpl_usage(enum export exp, const char *m, const char *s)
+{
+	const char *e = is_vmlinux(m) ?"":".ko";
+
+	switch (exp) {
+	case export_gpl:
+		fatal("modpost: GPL-incompatible module %s%s "
+		      "uses GPL-only symbol '%s'\n", m, e, s);
+		break;
+	case export_unused_gpl:
+		fatal("modpost: GPL-incompatible module %s%s "
+		      "uses GPL-only symbol marked UNUSED '%s'\n", m, e, s);
+		break;
+	case export_gpl_future:
+		warn("modpost: GPL-incompatible module %s%s "
+		      "uses future GPL-only symbol '%s'\n", m, e, s);
+		break;
+	case export_plain:
+	case export_unused:
+	case export_unknown:
+		/* ignore */
+		break;
+	}
+}
+
+static void check_for_unused(enum export exp, const char* m, const char* s)
+{
+	const char *e = is_vmlinux(m) ?"":".ko";
+
+	switch (exp) {
+	case export_unused:
+	case export_unused_gpl:
+		warn("modpost: module %s%s "
+		      "uses symbol '%s' marked UNUSED\n", m, e, s);
+		break;
+	default:
+		/* ignore */
+		break;
+	}
+}
+
+static void check_exports(struct module *mod)
 {
 	struct symbol *s, *exp;
 
 	for (s = mod->unres; s; s = s->next) {
 		const char *basename;
-		if (mod->gpl_compatible == 1) {
-			/* GPL-compatible modules may use all symbols */
-			continue;
-		}
 		exp = find_symbol(s->name);
 		if (!exp || exp->module == mod)
 			continue;
 		basename = strrchr(mod->name, '/');
 		if (basename)
 			basename++;
-		switch (exp->export) {
-			case export_gpl:
-				fatal("modpost: GPL-incompatible module %s "
-				      "uses GPL-only symbol '%s'\n",
-				 basename ? basename : mod->name,
-				exp->name);
-				break;
-			case export_gpl_future:
-				warn("modpost: GPL-incompatible module %s "
-				      "uses future GPL-only symbol '%s'\n",
-				      basename ? basename : mod->name,
-				      exp->name);
-				break;
-			case export_plain: /* ignore */ break;
-			case export_unknown: /* ignore */ break;
-		}
+		else
+			basename = mod->name;
+		if (!mod->gpl_compatible)
+			check_for_gpl_usage(exp->export, basename, exp->name);
+		check_for_unused(exp->export, basename, exp->name);
         }
 }
 
@@ -1271,7 +1312,7 @@
 }
 
 /* parse Module.symvers file. line format:
- * 0x12345678<tab>symbol<tab>module[<tab>export]
+ * 0x12345678<tab>symbol<tab>module[[<tab>export]<tab>something]
  **/
 static void read_dump(const char *fname, unsigned int kernel)
 {
@@ -1284,7 +1325,7 @@
 		return;
 
 	while ((line = get_next_line(&pos, file, size))) {
-		char *symname, *modname, *d, *export;
+		char *symname, *modname, *d, *export, *end;
 		unsigned int crc;
 		struct module *mod;
 		struct symbol *s;
@@ -1297,7 +1338,8 @@
 		*modname++ = '\0';
 		if ((export = strchr(modname, '\t')) != NULL)
 			*export++ = '\0';
-
+		if (export && ((end = strchr(export, '\t')) != NULL))
+			*end = '\0';
 		crc = strtoul(line, &d, 16);
 		if (*symname == '\0' || *modname == '\0' || *d != '\0')
 			goto fail;
@@ -1396,7 +1438,7 @@
 	for (mod = modules; mod; mod = mod->next) {
 		if (mod->skip)
 			continue;
-		check_license(mod);
+		check_exports(mod);
 	}
 
 	for (mod = modules; mod; mod = mod->next) {
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 2b00c60..d398c61 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -117,7 +117,9 @@
 	Elf_Sym      *symtab_start;
 	Elf_Sym      *symtab_stop;
 	Elf_Section  export_sec;
+	Elf_Section  export_unused_sec;
 	Elf_Section  export_gpl_sec;
+	Elf_Section  export_unused_gpl_sec;
 	Elf_Section  export_gpl_future_sec;
 	const char   *strtab;
 	char	     *modinfo;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index e9548bc..d2e80e6 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1845,15 +1845,20 @@
 		return -ENOTSUPP;
 
 	switch (field) {
-	case AUDIT_SE_USER:
-	case AUDIT_SE_ROLE:
-	case AUDIT_SE_TYPE:
+	case AUDIT_SUBJ_USER:
+	case AUDIT_SUBJ_ROLE:
+	case AUDIT_SUBJ_TYPE:
+	case AUDIT_OBJ_USER:
+	case AUDIT_OBJ_ROLE:
+	case AUDIT_OBJ_TYPE:
 		/* only 'equals' and 'not equals' fit user, role, and type */
 		if (op != AUDIT_EQUAL && op != AUDIT_NOT_EQUAL)
 			return -EINVAL;
 		break;
-	case AUDIT_SE_SEN:
-	case AUDIT_SE_CLR:
+	case AUDIT_SUBJ_SEN:
+	case AUDIT_SUBJ_CLR:
+	case AUDIT_OBJ_LEV_LOW:
+	case AUDIT_OBJ_LEV_HIGH:
 		/* we do not allow a range, indicated by the presense of '-' */
 		if (strchr(rulestr, '-'))
 			return -EINVAL;
@@ -1874,29 +1879,34 @@
 	tmprule->au_seqno = latest_granting;
 
 	switch (field) {
-	case AUDIT_SE_USER:
+	case AUDIT_SUBJ_USER:
+	case AUDIT_OBJ_USER:
 		userdatum = hashtab_search(policydb.p_users.table, rulestr);
 		if (!userdatum)
 			rc = -EINVAL;
 		else
 			tmprule->au_ctxt.user = userdatum->value;
 		break;
-	case AUDIT_SE_ROLE:
+	case AUDIT_SUBJ_ROLE:
+	case AUDIT_OBJ_ROLE:
 		roledatum = hashtab_search(policydb.p_roles.table, rulestr);
 		if (!roledatum)
 			rc = -EINVAL;
 		else
 			tmprule->au_ctxt.role = roledatum->value;
 		break;
-	case AUDIT_SE_TYPE:
+	case AUDIT_SUBJ_TYPE:
+	case AUDIT_OBJ_TYPE:
 		typedatum = hashtab_search(policydb.p_types.table, rulestr);
 		if (!typedatum)
 			rc = -EINVAL;
 		else
 			tmprule->au_ctxt.type = typedatum->value;
 		break;
-	case AUDIT_SE_SEN:
-	case AUDIT_SE_CLR:
+	case AUDIT_SUBJ_SEN:
+	case AUDIT_SUBJ_CLR:
+	case AUDIT_OBJ_LEV_LOW:
+	case AUDIT_OBJ_LEV_HIGH:
 		rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC);
 		break;
 	}
@@ -1948,7 +1958,8 @@
 	/* a field/op pair that is not caught here will simply fall through
 	   without a match */
 	switch (field) {
-	case AUDIT_SE_USER:
+	case AUDIT_SUBJ_USER:
+	case AUDIT_OBJ_USER:
 		switch (op) {
 		case AUDIT_EQUAL:
 			match = (ctxt->user == rule->au_ctxt.user);
@@ -1958,7 +1969,8 @@
 			break;
 		}
 		break;
-	case AUDIT_SE_ROLE:
+	case AUDIT_SUBJ_ROLE:
+	case AUDIT_OBJ_ROLE:
 		switch (op) {
 		case AUDIT_EQUAL:
 			match = (ctxt->role == rule->au_ctxt.role);
@@ -1968,7 +1980,8 @@
 			break;
 		}
 		break;
-	case AUDIT_SE_TYPE:
+	case AUDIT_SUBJ_TYPE:
+	case AUDIT_OBJ_TYPE:
 		switch (op) {
 		case AUDIT_EQUAL:
 			match = (ctxt->type == rule->au_ctxt.type);
@@ -1978,9 +1991,12 @@
 			break;
 		}
 		break;
-	case AUDIT_SE_SEN:
-	case AUDIT_SE_CLR:
-		level = (field == AUDIT_SE_SEN ?
+	case AUDIT_SUBJ_SEN:
+	case AUDIT_SUBJ_CLR:
+	case AUDIT_OBJ_LEV_LOW:
+	case AUDIT_OBJ_LEV_HIGH:
+		level = ((field == AUDIT_SUBJ_SEN ||
+		          field == AUDIT_OBJ_LEV_LOW) ?
 		         &ctxt->range.level[0] : &ctxt->range.level[1]);
 		switch (op) {
 		case AUDIT_EQUAL: