Merge branch 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'percpu-cpumask-x86-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (682 commits)
  percpu: fix spurious alignment WARN in legacy SMP percpu allocator
  percpu: generalize embedding first chunk setup helper
  percpu: more flexibility for @dyn_size of pcpu_setup_first_chunk()
  percpu: make x86 addr <-> pcpu ptr conversion macros generic
  linker script: define __per_cpu_load on all SMP capable archs
  x86: UV: remove uv_flush_tlb_others() WARN_ON
  percpu: finer grained locking to break deadlock and allow atomic free
  percpu: move fully free chunk reclamation into a work
  percpu: move chunk area map extension out of area allocation
  percpu: replace pcpu_realloc() with pcpu_mem_alloc() and pcpu_mem_free()
  x86, percpu: setup reserved percpu area for x86_64
  percpu, module: implement reserved allocation and use it for module percpu variables
  percpu: add an indirection ptr for chunk page map access
  x86: make embedding percpu allocator return excessive free space
  percpu: use negative for auto for pcpu_setup_first_chunk() arguments
  percpu: improve first chunk initial area map handling
  percpu: cosmetic renames in pcpu_setup_first_chunk()
  percpu: clean up percpu constants
  x86: un-__init fill_pud/pmd/pte
  x86: remove vestigial fix_ioremap prototypes
  ...

Manually merge conflicts in arch/ia64/kernel/irq_ia64.c
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d541671..bdef2ce 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -199,6 +199,10 @@
 	return __va(phys_addr);
 }
 
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+}
+
 /* --------------------------------------------------------------------------
                             Boot-time Table Parsing
    -------------------------------------------------------------------------- */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index e131250..166e0d8 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -880,7 +880,7 @@
 	if (iosapic_intr_info[irq].count == 0) {
 #ifdef CONFIG_SMP
 		/* Clear affinity */
-		cpus_setall(idesc->affinity);
+		cpumask_setall(idesc->affinity);
 #endif
 		/* Clear the interrupt information */
 		iosapic_intr_info[irq].dest = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 4f59661..7429752 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -103,7 +103,7 @@
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
 	if (irq < NR_IRQS) {
-		cpumask_copy(&irq_desc[irq].affinity,
+		cpumask_copy(irq_desc[irq].affinity,
 			     cpumask_of(cpu_logical_id(hwid)));
 		irq_redir[irq] = (char) (redir & 0xff);
 	}
@@ -148,7 +148,7 @@
 		if (desc->status == IRQ_PER_CPU)
 			continue;
 
-		if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+		if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
 		    >= nr_cpu_ids) {
 			/*
 			 * Save it for phase 2 processing
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 977a6ef1..acc4d19 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -493,16 +493,15 @@
 	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
 	ia64_srlz_d();
 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
-		struct irq_desc *desc;
 		int irq = local_vector_to_irq(vector);
+		struct irq_desc *desc = irq_to_desc(irq);
 
-		desc = irq_desc + irq;
 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 			smp_local_flush_tlb();
 			kstat_incr_irqs_this_cpu(irq, desc);
-		} else if (unlikely(IS_RESCHEDULE(vector)))
+		} else if (unlikely(IS_RESCHEDULE(vector))) {
 			kstat_incr_irqs_this_cpu(irq, desc);
-		else {
+		} else {
 			ia64_setreg(_IA64_REG_CR_TPR, vector);
 			ia64_srlz_d();
 
@@ -553,16 +552,15 @@
 	  * Perform normal interrupt style processing
 	  */
 	while (vector != IA64_SPURIOUS_INT_VECTOR) {
-		struct irq_desc *desc;
 		int irq = local_vector_to_irq(vector);
-		desc = irq_desc + irq;
+		struct irq_desc *desc = irq_to_desc(irq);
 
 		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
 			smp_local_flush_tlb();
 			kstat_incr_irqs_this_cpu(irq, desc);
-		} else if (unlikely(IS_RESCHEDULE(vector)))
+		} else if (unlikely(IS_RESCHEDULE(vector))) {
 			kstat_incr_irqs_this_cpu(irq, desc);
-		else {
+		} else {
 			struct pt_regs *old_regs = set_irq_regs(NULL);
 
 			ia64_setreg(_IA64_REG_CR_TPR, vector);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 368ee4e..2b15e23 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -38,7 +38,7 @@
 	msg.data = data;
 
 	write_msi_msg(irq, &msg);
-	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+	cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 }
 #endif /* CONFIG_SMP */
 
@@ -150,7 +150,7 @@
 	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
 
 	dmar_msi_write(irq, &msg);
-	irq_desc[irq].affinity = *mask;
+	cpumask_copy(irq_desc[irq].affinity, mask);
 }
 #endif /* CONFIG_SMP */
 
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 10a7d47e..3765efc 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -213,16 +213,9 @@
         { *(.data.cacheline_aligned) }
 
   /* Per-cpu data: */
-  percpu : { } :percpu
   . = ALIGN(PERCPU_PAGE_SIZE);
-  __phys_per_cpu_start = .;
-  .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
-	{
-		__per_cpu_start = .;
-		*(.data.percpu)
-		*(.data.percpu.shared_aligned)
-		__per_cpu_end = .;
-	}
+  PERCPU_VADDR(PERCPU_ADDR, :percpu)
+  __phys_per_cpu_start = __per_cpu_load;
   . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;	/* ensure percpu data fits
   						 * into percpu page size
 						 */