Merge branch 'slabh' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc

* 'slabh' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc:
  eeepc-wmi: include slab.h
  staging/otus: include slab.h from usbdrv.h
  percpu: don't implicitly include slab.h from percpu.h
  kmemcheck: Fix build errors due to missing slab.h
  include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
  iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
  x86: don't include slab.h from arch/x86/include/asm/pgtable_32.h

Fix up trivial conflicts in include/linux/percpu.h due to
is_kernel_percpu_address() having been introduced since the slab.h
cleanup with the percpu_up.c splitup.
diff --git a/include/linux/module.h b/include/linux/module.h
index 5e869ff..8bd399a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -330,8 +330,11 @@
 	struct module_notes_attrs *notes_attrs;
 #endif
 
+#ifdef CONFIG_SMP
 	/* Per-cpu data. */
-	void *percpu;
+	void __percpu *percpu;
+	unsigned int percpu_size;
+#endif
 
 	/* The command line arguments (may be mangled).  People like
 	   keeping pointers to this stuff */
@@ -392,6 +395,7 @@
 struct module *__module_text_address(unsigned long addr);
 struct module *__module_address(unsigned long addr);
 bool is_module_address(unsigned long addr);
+bool is_module_percpu_address(unsigned long addr);
 bool is_module_text_address(unsigned long addr);
 
 static inline int within_module_core(unsigned long addr, struct module *mod)
@@ -563,6 +567,11 @@
 	return false;
 }
 
+static inline bool is_module_percpu_address(unsigned long addr)
+{
+	return false;
+}
+
 static inline bool is_module_text_address(unsigned long addr)
 {
 	return false;
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index c7845130..d3a38d6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -135,6 +135,7 @@
 #define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
 
 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern bool is_kernel_percpu_address(unsigned long addr);
 
 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
 extern void __init setup_per_cpu_areas(void);
@@ -144,6 +145,12 @@
 
 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
 
+/* can't distinguish from other static vars, always false */
+static inline bool is_kernel_percpu_address(unsigned long addr)
+{
+	return false;
+}
+
 static inline void __init setup_per_cpu_areas(void) { }
 
 static inline void *pcpu_lpage_remapped(void *kaddr)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 367f724..2594e1c 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -583,9 +583,6 @@
 	unsigned long start = (unsigned long) &_stext,
 		      end   = (unsigned long) &_end,
 		      addr  = (unsigned long) obj;
-#ifdef CONFIG_SMP
-	int i;
-#endif
 
 	/*
 	 * static variable?
@@ -596,24 +593,16 @@
 	if (arch_is_kernel_data(addr))
 		return 1;
 
-#ifdef CONFIG_SMP
 	/*
-	 * percpu var?
+	 * in-kernel percpu var?
 	 */
-	for_each_possible_cpu(i) {
-		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
-		end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
-					+ per_cpu_offset(i);
-
-		if ((addr >= start) && (addr < end))
-			return 1;
-	}
-#endif
+	if (is_kernel_percpu_address(addr))
+		return 1;
 
 	/*
-	 * module var?
+	 * module static or percpu var?
 	 */
-	return is_module_address(addr);
+	return is_module_address(addr) || is_module_percpu_address(addr);
 }
 
 /*
diff --git a/kernel/module.c b/kernel/module.c
index c968d36..9f8d23d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -370,27 +370,33 @@
 
 #ifdef CONFIG_SMP
 
-static void *percpu_modalloc(unsigned long size, unsigned long align,
-			     const char *name)
+static inline void __percpu *mod_percpu(struct module *mod)
 {
-	void *ptr;
+	return mod->percpu;
+}
 
+static int percpu_modalloc(struct module *mod,
+			   unsigned long size, unsigned long align)
+{
 	if (align > PAGE_SIZE) {
 		printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
-		       name, align, PAGE_SIZE);
+		       mod->name, align, PAGE_SIZE);
 		align = PAGE_SIZE;
 	}
 
-	ptr = __alloc_reserved_percpu(size, align);
-	if (!ptr)
+	mod->percpu = __alloc_reserved_percpu(size, align);
+	if (!mod->percpu) {
 		printk(KERN_WARNING
 		       "Could not allocate %lu bytes percpu data\n", size);
-	return ptr;
+		return -ENOMEM;
+	}
+	mod->percpu_size = size;
+	return 0;
 }
 
-static void percpu_modfree(void *freeme)
+static void percpu_modfree(struct module *mod)
 {
-	free_percpu(freeme);
+	free_percpu(mod->percpu);
 }
 
 static unsigned int find_pcpusec(Elf_Ehdr *hdr,
@@ -400,24 +406,62 @@
 	return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
 }
 
-static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
+static void percpu_modcopy(struct module *mod,
+			   const void *from, unsigned long size)
 {
 	int cpu;
 
 	for_each_possible_cpu(cpu)
-		memcpy(pcpudest + per_cpu_offset(cpu), from, size);
+		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
+}
+
+/**
+ * is_module_percpu_address - test whether address is from module static percpu
+ * @addr: address to test
+ *
+ * Test whether @addr belongs to module static percpu area.
+ *
+ * RETURNS:
+ * %true if @addr is from module static percpu area
+ */
+bool is_module_percpu_address(unsigned long addr)
+{
+	struct module *mod;
+	unsigned int cpu;
+
+	preempt_disable();
+
+	list_for_each_entry_rcu(mod, &modules, list) {
+		if (!mod->percpu_size)
+			continue;
+		for_each_possible_cpu(cpu) {
+			void *start = per_cpu_ptr(mod->percpu, cpu);
+
+			if ((void *)addr >= start &&
+			    (void *)addr < start + mod->percpu_size) {
+				preempt_enable();
+				return true;
+			}
+		}
+	}
+
+	preempt_enable();
+	return false;
 }
 
 #else /* ... !CONFIG_SMP */
 
-static inline void *percpu_modalloc(unsigned long size, unsigned long align,
-				    const char *name)
+static inline void __percpu *mod_percpu(struct module *mod)
 {
 	return NULL;
 }
-static inline void percpu_modfree(void *pcpuptr)
+static inline int percpu_modalloc(struct module *mod,
+				  unsigned long size, unsigned long align)
 {
-	BUG();
+	return -ENOMEM;
+}
+static inline void percpu_modfree(struct module *mod)
+{
 }
 static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
 					Elf_Shdr *sechdrs,
@@ -425,12 +469,16 @@
 {
 	return 0;
 }
-static inline void percpu_modcopy(void *pcpudst, const void *src,
-				  unsigned long size)
+static inline void percpu_modcopy(struct module *mod,
+				  const void *from, unsigned long size)
 {
 	/* pcpusec should be 0, and size of that section should be 0. */
 	BUG_ON(size != 0);
 }
+bool is_module_percpu_address(unsigned long addr)
+{
+	return false;
+}
 
 #endif /* CONFIG_SMP */
 
@@ -1400,8 +1448,7 @@
 	/* This may be NULL, but that's OK */
 	module_free(mod, mod->module_init);
 	kfree(mod->args);
-	if (mod->percpu)
-		percpu_modfree(mod->percpu);
+	percpu_modfree(mod);
 #if defined(CONFIG_MODULE_UNLOAD)
 	if (mod->refptr)
 		free_percpu(mod->refptr);
@@ -1520,7 +1567,7 @@
 		default:
 			/* Divert to percpu allocation if a percpu var. */
 			if (sym[i].st_shndx == pcpuindex)
-				secbase = (unsigned long)mod->percpu;
+				secbase = (unsigned long)mod_percpu(mod);
 			else
 				secbase = sechdrs[sym[i].st_shndx].sh_addr;
 			sym[i].st_value += secbase;
@@ -1954,7 +2001,7 @@
 	unsigned int modindex, versindex, infoindex, pcpuindex;
 	struct module *mod;
 	long err = 0;
-	void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
+	void *ptr = NULL; /* Stops spurious gcc warning */
 	unsigned long symoffs, stroffs, *strmap;
 
 	mm_segment_t old_fs;
@@ -2094,15 +2141,11 @@
 
 	if (pcpuindex) {
 		/* We have a special allocation for this section. */
-		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
-					 sechdrs[pcpuindex].sh_addralign,
-					 mod->name);
-		if (!percpu) {
-			err = -ENOMEM;
+		err = percpu_modalloc(mod, sechdrs[pcpuindex].sh_size,
+				      sechdrs[pcpuindex].sh_addralign);
+		if (err)
 			goto free_mod;
-		}
 		sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
-		mod->percpu = percpu;
 	}
 
 	/* Determine total sizes, and put offsets in sh_entsize.  For now
@@ -2317,7 +2360,7 @@
 	sort_extable(mod->extable, mod->extable + mod->num_exentries);
 
 	/* Finally, copy percpu area over. */
-	percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
+	percpu_modcopy(mod, (void *)sechdrs[pcpuindex].sh_addr,
 		       sechdrs[pcpuindex].sh_size);
 
 	add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex,
@@ -2409,8 +2452,7 @@
 	module_free(mod, mod->module_core);
 	/* mod will be freed with core. Don't access it beyond this line! */
  free_percpu:
-	if (percpu)
-		percpu_modfree(percpu);
+	percpu_modfree(mod);
  free_mod:
 	kfree(args);
 	kfree(strmap);
diff --git a/mm/percpu.c b/mm/percpu.c
index 768419d..6e09741 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1304,6 +1304,32 @@
 EXPORT_SYMBOL_GPL(free_percpu);
 
 /**
+ * is_kernel_percpu_address - test whether address is from static percpu area
+ * @addr: address to test
+ *
+ * Test whether @addr belongs to in-kernel static percpu area.  Module
+ * static percpu areas are not considered.  For those, use
+ * is_module_percpu_address().
+ *
+ * RETURNS:
+ * %true if @addr is from in-kernel static percpu area, %false otherwise.
+ */
+bool is_kernel_percpu_address(unsigned long addr)
+{
+	const size_t static_size = __per_cpu_end - __per_cpu_start;
+	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu) {
+		void *start = per_cpu_ptr(base, cpu);
+
+		if ((void *)addr >= start && (void *)addr < start + static_size)
+			return true;
+        }
+	return false;
+}
+
+/**
  * per_cpu_ptr_to_phys - convert translated percpu address to physical address
  * @addr: the address to be converted to physical address
  *
diff --git a/mm/rmap.c b/mm/rmap.c
index fcd593c9..eaa7a09 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -232,6 +232,7 @@
  out_error_free_anon_vma:
 	anon_vma_free(anon_vma);
  out_error:
+	unlink_anon_vmas(vma);
 	return -ENOMEM;
 }