Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 4 | * |
| 5 | * Enhanced CPU detection and feature setting code by Mike Jagdis |
| 6 | * and Martin Mares, November 1997. |
| 7 | */ |
| 8 | |
| 9 | .text |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/threads.h> |
Sam Ravnborg | 8b2f7ff | 2008-01-30 13:33:28 +0100 | [diff] [blame] | 11 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/linkage.h> |
| 13 | #include <asm/segment.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 14 | #include <asm/page_types.h> |
| 15 | #include <asm/pgtable_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/cache.h> |
| 17 | #include <asm/thread_info.h> |
Sam Ravnborg | 86feeaa | 2005-09-09 19:28:28 +0200 | [diff] [blame] | 18 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/setup.h> |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 20 | #include <asm/processor-flags.h> |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 21 | #include <asm/msr-index.h> |
| 22 | #include <asm/cpufeature.h> |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 23 | #include <asm/percpu.h> |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 24 | |
| 25 | /* Physical address */ |
| 26 | #define pa(X) ((X) - __PAGE_OFFSET) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * References to members of the new_cpu_data structure. |
| 30 | */ |
| 31 | |
| 32 | #define X86 new_cpu_data+CPUINFO_x86 |
| 33 | #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor |
| 34 | #define X86_MODEL new_cpu_data+CPUINFO_x86_model |
| 35 | #define X86_MASK new_cpu_data+CPUINFO_x86_mask |
| 36 | #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math |
| 37 | #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level |
| 38 | #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability |
| 39 | #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id |
| 40 | |
| 41 | /* |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 42 | * This is how much memory in addition to the memory covered up to |
| 43 | * and including _end we need mapped initially. |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 44 | * We need: |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 45 | * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) |
| 46 | * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | * |
| 48 | * Modulo rounding, each megabyte assigned here requires a kilobyte of |
| 49 | * memory, which is currently unreclaimed. |
| 50 | * |
| 51 | * This should be a multiple of a page. |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 52 | * |
| 53 | * KERNEL_IMAGE_SIZE should be greater than pa(_end) |
| 54 | * and small than max_low_pfn, otherwise will waste some page table entries |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 57 | #if PTRS_PER_PMD > 1 |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 58 | #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 59 | #else |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 60 | #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 61 | #endif |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 62 | |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 63 | /* Number of possible pages in the lowmem region */ |
| 64 | LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) |
| 65 | |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 66 | /* Enough space to fit pagetables for the low memory linear map */ |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 67 | MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 68 | |
| 69 | /* |
| 70 | * Worst-case size of the kernel mapping we need to make: |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 71 | * a relocatable kernel can live anywhere in lowmem, so we need to be able |
| 72 | * to map all of lowmem. |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 73 | */ |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 74 | KERNEL_PAGES = LOWMEM_PAGES |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 75 | |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 76 | INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 77 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) |
Jeremy Fitzhardinge | 796216a | 2009-03-12 16:09:49 -0700 | [diff] [blame] | 78 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | /* |
| 80 | * 32-bit kernel entrypoint; only used by the boot CPU. On entry, |
| 81 | * %esi points to the real-mode code as a 32-bit pointer. |
| 82 | * CS and DS must be 4 GB flat segments, but we don't depend on |
| 83 | * any particular GDT layout, because we load our own as soon as we |
| 84 | * can. |
| 85 | */ |
Tim Abbott | 4ae59b9 | 2009-09-16 16:44:28 -0400 | [diff] [blame] | 86 | __HEAD |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | ENTRY(startup_32) |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 88 | movl pa(stack_start),%ecx |
| 89 | |
Rusty Russell | a24e785 | 2007-10-21 16:41:35 -0700 | [diff] [blame] | 90 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking |
| 91 | us to not reload segments */ |
| 92 | testb $(1<<6), BP_loadflags(%esi) |
| 93 | jnz 2f |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
| 95 | /* |
| 96 | * Set segments to known values. |
| 97 | */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 98 | lgdt pa(boot_gdt_descr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | movl $(__BOOT_DS),%eax |
| 100 | movl %eax,%ds |
| 101 | movl %eax,%es |
| 102 | movl %eax,%fs |
| 103 | movl %eax,%gs |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 104 | movl %eax,%ss |
Rusty Russell | a24e785 | 2007-10-21 16:41:35 -0700 | [diff] [blame] | 105 | 2: |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 106 | leal -__PAGE_OFFSET(%ecx),%esp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
| 108 | /* |
| 109 | * Clear BSS first so that there are no surprises... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | */ |
Rusty Russell | a24e785 | 2007-10-21 16:41:35 -0700 | [diff] [blame] | 111 | cld |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | xorl %eax,%eax |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 113 | movl $pa(__bss_start),%edi |
| 114 | movl $pa(__bss_stop),%ecx |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | subl %edi,%ecx |
| 116 | shrl $2,%ecx |
| 117 | rep ; stosl |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 118 | /* |
| 119 | * Copy bootup parameters out of the way. |
| 120 | * Note: %esi still has the pointer to the real-mode data. |
| 121 | * With the kexec as boot loader, parameter segment might be loaded beyond |
| 122 | * kernel image and might not even be addressable by early boot page tables. |
| 123 | * (kexec on panic case). Hence copy out the parameters before initializing |
| 124 | * page tables. |
| 125 | */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 126 | movl $pa(boot_params),%edi |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 127 | movl $(PARAM_SIZE/4),%ecx |
| 128 | cld |
| 129 | rep |
| 130 | movsl |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 131 | movl pa(boot_params) + NEW_CL_POINTER,%esi |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 132 | andl %esi,%esi |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 133 | jz 1f # No command line |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 134 | movl $pa(boot_command_line),%edi |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 135 | movl $(COMMAND_LINE_SIZE/4),%ecx |
| 136 | rep |
| 137 | movsl |
| 138 | 1: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
Thomas Gleixner | dc3119e7 | 2011-02-23 10:08:31 +0100 | [diff] [blame] | 140 | #ifdef CONFIG_OLPC |
Andres Salomon | fd699c7 | 2010-06-18 17:46:53 -0400 | [diff] [blame] | 141 | /* save OFW's pgdir table for later use when calling into OFW */ |
| 142 | movl %cr3, %eax |
| 143 | movl %eax, pa(olpc_ofw_pgd) |
| 144 | #endif |
| 145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | /* |
| 147 | * Initialize page tables. This creates a PDE and a set of page |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 148 | * tables, which are located immediately beyond __brk_base. The variable |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 149 | * _brk_end is set up to point to the first "safe" location. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | * Mappings are created both at virtual address 0 (identity mapping) |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 151 | * and PAGE_OFFSET for up to _end. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 153 | #ifdef CONFIG_X86_PAE |
| 154 | |
| 155 | /* |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 156 | * In PAE mode initial_page_table is statically defined to contain |
| 157 | * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 |
| 158 | * entries). The identity mapping is handled by pointing two PGD entries |
| 159 | * to the first kernel PMD. |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 160 | * |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 161 | * Note the upper half of each PMD or PTE are always zero at this stage. |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 162 | */ |
| 163 | |
Joe Korty | 86b2b70 | 2008-06-02 17:21:06 -0400 | [diff] [blame] | 164 | #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 165 | |
| 166 | xorl %ebx,%ebx /* %ebx is kept at zero */ |
| 167 | |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 168 | movl $pa(__brk_base), %edi |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 169 | movl $pa(initial_pg_pmd), %edx |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 170 | movl $PTE_IDENT_ATTR, %eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | 10: |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 172 | leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 173 | movl %ecx,(%edx) /* Store PMD entry */ |
| 174 | /* Upper half already zero */ |
| 175 | addl $8,%edx |
| 176 | movl $512,%ecx |
| 177 | 11: |
| 178 | stosl |
| 179 | xchgl %eax,%ebx |
| 180 | stosl |
| 181 | xchgl %eax,%ebx |
| 182 | addl $0x1000,%eax |
| 183 | loop 11b |
| 184 | |
| 185 | /* |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 186 | * End condition: we must map up to the end + MAPPING_BEYOND_END. |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 187 | */ |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 188 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 189 | cmpl %ebp,%eax |
| 190 | jb 10b |
| 191 | 1: |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 192 | addl $__PAGE_OFFSET, %edi |
| 193 | movl %edi, pa(_brk_end) |
Yinghai Lu | 6af61a7 | 2008-06-01 23:53:50 -0700 | [diff] [blame] | 194 | shrl $12, %eax |
| 195 | movl %eax, pa(max_pfn_mapped) |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 196 | |
| 197 | /* Do early initialization of the fixmap area */ |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 198 | movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax |
| 199 | movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 200 | #else /* Not PAE */ |
| 201 | |
| 202 | page_pde_offset = (__PAGE_OFFSET >> 20); |
| 203 | |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 204 | movl $pa(__brk_base), %edi |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 205 | movl $pa(initial_page_table), %edx |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 206 | movl $PTE_IDENT_ATTR, %eax |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 207 | 10: |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 208 | leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | movl %ecx,(%edx) /* Store identity PDE entry */ |
| 210 | movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ |
| 211 | addl $4,%edx |
| 212 | movl $1024, %ecx |
| 213 | 11: |
| 214 | stosl |
| 215 | addl $0x1000,%eax |
| 216 | loop 11b |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 217 | /* |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 218 | * End condition: we must map up to the end + MAPPING_BEYOND_END. |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 219 | */ |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 220 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | cmpl %ebp,%eax |
| 222 | jb 10b |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 223 | addl $__PAGE_OFFSET, %edi |
| 224 | movl %edi, pa(_brk_end) |
Yinghai Lu | 6af61a7 | 2008-06-01 23:53:50 -0700 | [diff] [blame] | 225 | shrl $12, %eax |
| 226 | movl %eax, pa(max_pfn_mapped) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 228 | /* Do early initialization of the fixmap area */ |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 229 | movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax |
| 230 | movl %eax,pa(initial_page_table+0xffc) |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 231 | #endif |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 232 | |
| 233 | #ifdef CONFIG_PARAVIRT |
| 234 | /* This is can only trip for a broken bootloader... */ |
| 235 | cmpw $0x207, pa(boot_params + BP_version) |
| 236 | jb default_entry |
| 237 | |
| 238 | /* Paravirt-compatible boot parameters. Look to see what architecture |
| 239 | we're booting under. */ |
| 240 | movl pa(boot_params + BP_hardware_subarch), %eax |
| 241 | cmpl $num_subarch_entries, %eax |
| 242 | jae bad_subarch |
| 243 | |
| 244 | movl pa(subarch_entries)(,%eax,4), %eax |
| 245 | subl $__PAGE_OFFSET, %eax |
| 246 | jmp *%eax |
| 247 | |
| 248 | bad_subarch: |
| 249 | WEAK(lguest_entry) |
| 250 | WEAK(xen_entry) |
| 251 | /* Unknown implementation; there's really |
| 252 | nothing we can do at this point. */ |
| 253 | ud2a |
| 254 | |
| 255 | __INITDATA |
| 256 | |
| 257 | subarch_entries: |
| 258 | .long default_entry /* normal x86/PC */ |
| 259 | .long lguest_entry /* lguest hypervisor */ |
| 260 | .long xen_entry /* Xen hypervisor */ |
| 261 | .long default_entry /* Moorestown MID */ |
| 262 | num_subarch_entries = (. - subarch_entries) / 4 |
| 263 | .previous |
| 264 | #else |
| 265 | jmp default_entry |
| 266 | #endif /* CONFIG_PARAVIRT */ |
| 267 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | /* |
| 269 | * Non-boot CPU entry point; entered from trampoline.S |
| 270 | * We can't lgdt here, because lgdt itself uses a data segment, but |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 271 | * we know the trampoline has already loaded the boot_gdt for us. |
Vivek Goyal | f8657e1 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 272 | * |
| 273 | * If cpu hotplug is not supported then this code can go in init section |
| 274 | * which will be freed later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | */ |
Vivek Goyal | f8657e1 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 276 | |
Jan Beulich | 78b89ec | 2009-08-18 16:41:33 +0100 | [diff] [blame] | 277 | __CPUINIT |
Vivek Goyal | f8657e1 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 278 | |
| 279 | #ifdef CONFIG_SMP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | ENTRY(startup_32_smp) |
| 281 | cld |
| 282 | movl $(__BOOT_DS),%eax |
| 283 | movl %eax,%ds |
| 284 | movl %eax,%es |
| 285 | movl %eax,%fs |
| 286 | movl %eax,%gs |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 287 | movl pa(stack_start),%ecx |
| 288 | movl %eax,%ss |
| 289 | leal -__PAGE_OFFSET(%ecx),%esp |
Ian Campbell | 5756dd5 | 2008-01-30 13:33:27 +0100 | [diff] [blame] | 290 | #endif /* CONFIG_SMP */ |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 291 | default_entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | |
| 293 | /* |
| 294 | * New page tables may be in 4Mbyte page mode and may |
| 295 | * be using the global pages. |
| 296 | * |
| 297 | * NOTE! If we are on a 486 we may have no cr4 at all! |
| 298 | * So we do not try to touch it unless we really have |
| 299 | * some bits in it to set. This won't work if the BSP |
| 300 | * implements cr4 but this AP does not -- very unlikely |
| 301 | * but be warned! The same applies to the pse feature |
| 302 | * if not equally supported. --macro |
| 303 | * |
| 304 | * NOTE! We have to correct for the fact that we're |
| 305 | * not yet offset PAGE_OFFSET.. |
| 306 | */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 307 | #define cr4_bits pa(mmu_cr4_features) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | movl cr4_bits,%edx |
| 309 | andl %edx,%edx |
| 310 | jz 6f |
| 311 | movl %cr4,%eax # Turn on paging options (PSE,PAE,..) |
| 312 | orl %edx,%eax |
| 313 | movl %eax,%cr4 |
| 314 | |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 315 | testb $X86_CR4_PAE, %al # check if PAE is enabled |
| 316 | jz 6f |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | |
| 318 | /* Check if extended functions are implemented */ |
| 319 | movl $0x80000000, %eax |
| 320 | cpuid |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 321 | /* Value must be in the range 0x80000001 to 0x8000ffff */ |
| 322 | subl $0x80000001, %eax |
| 323 | cmpl $(0x8000ffff-0x80000001), %eax |
| 324 | ja 6f |
Kees Cook | ebba638 | 2010-11-10 10:35:53 -0800 | [diff] [blame] | 325 | |
| 326 | /* Clear bogus XD_DISABLE bits */ |
| 327 | call verify_cpu |
| 328 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | mov $0x80000001, %eax |
| 330 | cpuid |
| 331 | /* Execute Disable bit supported? */ |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 332 | btl $(X86_FEATURE_NX & 31), %edx |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | jnc 6f |
| 334 | |
| 335 | /* Setup EFER (Extended Feature Enable Register) */ |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 336 | movl $MSR_EFER, %ecx |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | rdmsr |
| 338 | |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 339 | btsl $_EFER_NX, %eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | /* Make changes effective */ |
| 341 | wrmsr |
| 342 | |
| 343 | 6: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | |
| 345 | /* |
| 346 | * Enable paging |
| 347 | */ |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 348 | movl $pa(initial_page_table), %eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | movl %eax,%cr3 /* set the page table pointer.. */ |
| 350 | movl %cr0,%eax |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 351 | orl $X86_CR0_PG,%eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | movl %eax,%cr0 /* ..and set paging (PG) bit */ |
| 353 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ |
| 354 | 1: |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 355 | /* Shift the stack pointer to a virtual address */ |
| 356 | addl $__PAGE_OFFSET, %esp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | |
| 358 | /* |
| 359 | * Initialize eflags. Some BIOS's leave bits like NT set. This would |
| 360 | * confuse the debugger if this code is traced. |
| 361 | * XXX - best to initialize before switching to protected mode. |
| 362 | */ |
| 363 | pushl $0 |
| 364 | popfl |
| 365 | |
| 366 | #ifdef CONFIG_SMP |
Ian Campbell | 5035950 | 2008-01-30 13:33:27 +0100 | [diff] [blame] | 367 | cmpb $0, ready |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 368 | jnz checkCPUtype |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | #endif /* CONFIG_SMP */ |
| 370 | |
| 371 | /* |
| 372 | * start system 32-bit setup. We need to re-do some of the things done |
| 373 | * in 16-bit mode for the "real" operations. |
| 374 | */ |
| 375 | call setup_idt |
| 376 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | checkCPUtype: |
| 378 | |
| 379 | movl $-1,X86_CPUID # -1 for no CPUID initially |
| 380 | |
| 381 | /* check if it is 486 or 386. */ |
| 382 | /* |
| 383 | * XXX - this does a lot of unnecessary setup. Alignment checks don't |
| 384 | * apply at our cpl of 0 and the stack ought to be aligned already, and |
| 385 | * we don't need to preserve eflags. |
| 386 | */ |
| 387 | |
| 388 | movb $3,X86 # at least 386 |
| 389 | pushfl # push EFLAGS |
| 390 | popl %eax # get EFLAGS |
| 391 | movl %eax,%ecx # save original EFLAGS |
| 392 | xorl $0x240000,%eax # flip AC and ID bits in EFLAGS |
| 393 | pushl %eax # copy to EFLAGS |
| 394 | popfl # set EFLAGS |
| 395 | pushfl # get new EFLAGS |
| 396 | popl %eax # put it in eax |
| 397 | xorl %ecx,%eax # change in flags |
| 398 | pushl %ecx # restore original EFLAGS |
| 399 | popfl |
| 400 | testl $0x40000,%eax # check if AC bit changed |
| 401 | je is386 |
| 402 | |
| 403 | movb $4,X86 # at least 486 |
| 404 | testl $0x200000,%eax # check if ID bit changed |
| 405 | je is486 |
| 406 | |
| 407 | /* get vendor info */ |
| 408 | xorl %eax,%eax # call CPUID with 0 -> return vendor ID |
| 409 | cpuid |
| 410 | movl %eax,X86_CPUID # save CPUID level |
| 411 | movl %ebx,X86_VENDOR_ID # lo 4 chars |
| 412 | movl %edx,X86_VENDOR_ID+4 # next 4 chars |
| 413 | movl %ecx,X86_VENDOR_ID+8 # last 4 chars |
| 414 | |
| 415 | orl %eax,%eax # do we have processor info as well? |
| 416 | je is486 |
| 417 | |
| 418 | movl $1,%eax # Use the CPUID instruction to get CPU type |
| 419 | cpuid |
| 420 | movb %al,%cl # save reg for future use |
| 421 | andb $0x0f,%ah # mask processor family |
| 422 | movb %ah,X86 |
| 423 | andb $0xf0,%al # mask model |
| 424 | shrb $4,%al |
| 425 | movb %al,X86_MODEL |
| 426 | andb $0x0f,%cl # mask mask revision |
| 427 | movb %cl,X86_MASK |
| 428 | movl %edx,X86_CAPABILITY |
| 429 | |
| 430 | is486: movl $0x50022,%ecx # set AM, WP, NE and MP |
| 431 | jmp 2f |
| 432 | |
| 433 | is386: movl $2,%ecx # set MP |
| 434 | 2: movl %cr0,%eax |
| 435 | andl $0x80000011,%eax # Save PG,PE,ET |
| 436 | orl %ecx,%eax |
| 437 | movl %eax,%cr0 |
| 438 | |
| 439 | call check_x87 |
Rusty Russell | 2a57ff1 | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 440 | lgdt early_gdt_descr |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | lidt idt_descr |
| 442 | ljmp $(__KERNEL_CS),$1f |
| 443 | 1: movl $(__KERNEL_DS),%eax # reload all the segment registers |
| 444 | movl %eax,%ss # after changing gdt. |
| 445 | |
| 446 | movl $(__USER_DS),%eax # DS/ES contains default USER segment |
| 447 | movl %eax,%ds |
| 448 | movl %eax,%es |
| 449 | |
Brian Gerst | 0dd76d7 | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 450 | movl $(__KERNEL_PERCPU), %eax |
| 451 | movl %eax,%fs # set this cpu's percpu |
| 452 | |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 453 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 454 | /* |
| 455 | * The linker can't handle this by relocation. Manually set |
| 456 | * base address in stack canary segment descriptor. |
| 457 | */ |
| 458 | cmpb $0,ready |
| 459 | jne 1f |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 460 | movl $gdt_page,%eax |
| 461 | movl $stack_canary,%ecx |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 462 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) |
| 463 | shrl $16, %ecx |
| 464 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) |
| 465 | movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) |
| 466 | 1: |
| 467 | #endif |
| 468 | movl $(__KERNEL_STACK_CANARY),%eax |
Jeremy Fitzhardinge | 464d1a7 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 469 | movl %eax,%gs |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 470 | |
| 471 | xorl %eax,%eax # Clear LDT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | lldt %ax |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 473 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | cld # gcc2 wants the direction flag cleared at all times |
Jeremy Fitzhardinge | 26fd5e0 | 2006-10-21 18:37:02 +0200 | [diff] [blame] | 475 | pushl $0 # fake return address for unwinder |
Shaohua Li | d92de65 | 2005-06-25 14:54:49 -0700 | [diff] [blame] | 476 | movb $1, ready |
Glauber Costa | e3f77ed | 2008-05-28 12:57:02 -0300 | [diff] [blame] | 477 | jmp *(initial_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | |
| 479 | /* |
| 480 | * We depend on ET to be correct. This checks for 287/387. |
| 481 | */ |
| 482 | check_x87: |
| 483 | movb $0,X86_HARD_MATH |
| 484 | clts |
| 485 | fninit |
| 486 | fstsw %ax |
| 487 | cmpb $0,%al |
| 488 | je 1f |
| 489 | movl %cr0,%eax /* no coprocessor: have to set bits */ |
| 490 | xorl $4,%eax /* set EM */ |
| 491 | movl %eax,%cr0 |
| 492 | ret |
| 493 | ALIGN |
| 494 | 1: movb $1,X86_HARD_MATH |
| 495 | .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ |
| 496 | ret |
| 497 | |
| 498 | /* |
| 499 | * setup_idt |
| 500 | * |
| 501 | * sets up a idt with 256 entries pointing to |
| 502 | * ignore_int, interrupt gates. It doesn't actually load |
| 503 | * idt - that can be done only after paging has been enabled |
| 504 | * and the kernel moved to PAGE_OFFSET. Interrupts |
| 505 | * are enabled elsewhere, when we can be relatively |
| 506 | * sure everything is ok. |
| 507 | * |
| 508 | * Warning: %esi is live across this function. |
| 509 | */ |
| 510 | setup_idt: |
| 511 | lea ignore_int,%edx |
| 512 | movl $(__KERNEL_CS << 16),%eax |
| 513 | movw %dx,%ax /* selector = 0x0010 = cs */ |
| 514 | movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ |
| 515 | |
| 516 | lea idt_table,%edi |
| 517 | mov $256,%ecx |
| 518 | rp_sidt: |
| 519 | movl %eax,(%edi) |
| 520 | movl %edx,4(%edi) |
| 521 | addl $8,%edi |
| 522 | dec %ecx |
| 523 | jne rp_sidt |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 524 | |
| 525 | .macro set_early_handler handler,trapno |
| 526 | lea \handler,%edx |
| 527 | movl $(__KERNEL_CS << 16),%eax |
| 528 | movw %dx,%ax |
| 529 | movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ |
| 530 | lea idt_table,%edi |
| 531 | movl %eax,8*\trapno(%edi) |
| 532 | movl %edx,8*\trapno+4(%edi) |
| 533 | .endm |
| 534 | |
| 535 | set_early_handler handler=early_divide_err,trapno=0 |
| 536 | set_early_handler handler=early_illegal_opcode,trapno=6 |
| 537 | set_early_handler handler=early_protection_fault,trapno=13 |
| 538 | set_early_handler handler=early_page_fault,trapno=14 |
| 539 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | ret |
| 541 | |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 542 | early_divide_err: |
| 543 | xor %edx,%edx |
| 544 | pushl $0 /* fake errcode */ |
| 545 | jmp early_fault |
| 546 | |
| 547 | early_illegal_opcode: |
| 548 | movl $6,%edx |
| 549 | pushl $0 /* fake errcode */ |
| 550 | jmp early_fault |
| 551 | |
| 552 | early_protection_fault: |
| 553 | movl $13,%edx |
| 554 | jmp early_fault |
| 555 | |
| 556 | early_page_fault: |
| 557 | movl $14,%edx |
| 558 | jmp early_fault |
| 559 | |
| 560 | early_fault: |
| 561 | cld |
| 562 | #ifdef CONFIG_PRINTK |
Ingo Molnar | 382f64a | 2007-10-17 18:04:41 +0200 | [diff] [blame] | 563 | pusha |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 564 | movl $(__KERNEL_DS),%eax |
| 565 | movl %eax,%ds |
| 566 | movl %eax,%es |
| 567 | cmpl $2,early_recursion_flag |
| 568 | je hlt_loop |
| 569 | incl early_recursion_flag |
| 570 | movl %cr2,%eax |
| 571 | pushl %eax |
| 572 | pushl %edx /* trapno */ |
| 573 | pushl $fault_msg |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 574 | call printk |
| 575 | #endif |
Ingo Molnar | 94878ef | 2008-01-30 13:33:09 +0100 | [diff] [blame] | 576 | call dump_stack |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 577 | hlt_loop: |
| 578 | hlt |
| 579 | jmp hlt_loop |
| 580 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | /* This is the default interrupt "handler" :-) */ |
| 582 | ALIGN |
| 583 | ignore_int: |
| 584 | cld |
Matt Mackall | d59745c | 2005-05-01 08:59:02 -0700 | [diff] [blame] | 585 | #ifdef CONFIG_PRINTK |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | pushl %eax |
| 587 | pushl %ecx |
| 588 | pushl %edx |
| 589 | pushl %es |
| 590 | pushl %ds |
| 591 | movl $(__KERNEL_DS),%eax |
| 592 | movl %eax,%ds |
| 593 | movl %eax,%es |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 594 | cmpl $2,early_recursion_flag |
| 595 | je hlt_loop |
| 596 | incl early_recursion_flag |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | pushl 16(%esp) |
| 598 | pushl 24(%esp) |
| 599 | pushl 32(%esp) |
| 600 | pushl 40(%esp) |
| 601 | pushl $int_msg |
| 602 | call printk |
Ingo Molnar | d5e397c | 2009-01-26 06:09:00 +0100 | [diff] [blame] | 603 | |
| 604 | call dump_stack |
| 605 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | addl $(5*4),%esp |
| 607 | popl %ds |
| 608 | popl %es |
| 609 | popl %edx |
| 610 | popl %ecx |
| 611 | popl %eax |
Matt Mackall | d59745c | 2005-05-01 08:59:02 -0700 | [diff] [blame] | 612 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | iret |
| 614 | |
Kees Cook | ebba638 | 2010-11-10 10:35:53 -0800 | [diff] [blame] | 615 | #include "verify_cpu.S" |
| 616 | |
Robert Richter | 0e83815 | 2009-07-27 19:43:52 +0200 | [diff] [blame] | 617 | __REFDATA |
Thomas Gleixner | 583323b | 2008-07-27 21:43:11 +0200 | [diff] [blame] | 618 | .align 4 |
| 619 | ENTRY(initial_code) |
| 620 | .long i386_start_kernel |
| 621 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | /* |
| 623 | * BSS section |
| 624 | */ |
Tim Abbott | 02b7da3 | 2009-09-20 18:14:14 -0400 | [diff] [blame] | 625 | __PAGE_ALIGNED_BSS |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 626 | .align PAGE_SIZE |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 627 | #ifdef CONFIG_X86_PAE |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 628 | initial_pg_pmd: |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 629 | .fill 1024*KPMDS,4,0 |
| 630 | #else |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 631 | ENTRY(initial_page_table) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | .fill 1024,4,0 |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 633 | #endif |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 634 | initial_pg_fixmap: |
Eric W. Biderman | b1c931e | 2007-07-15 23:37:28 -0700 | [diff] [blame] | 635 | .fill 1024,4,0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | ENTRY(empty_zero_page) |
| 637 | .fill 4096,1,0 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 638 | ENTRY(swapper_pg_dir) |
| 639 | .fill 1024,4,0 |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 640 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | /* |
| 642 | * This starts the data section. |
| 643 | */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 644 | #ifdef CONFIG_X86_PAE |
Tim Abbott | abe1ee3 | 2009-09-20 18:14:15 -0400 | [diff] [blame] | 645 | __PAGE_ALIGNED_DATA |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 646 | /* Page-aligned for the benefit of paravirt? */ |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 647 | .align PAGE_SIZE |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 648 | ENTRY(initial_page_table) |
| 649 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 650 | # if KPMDS == 3 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 651 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 |
| 652 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 |
| 653 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 654 | # elif KPMDS == 2 |
| 655 | .long 0,0 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 656 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 |
| 657 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 658 | # elif KPMDS == 1 |
| 659 | .long 0,0 |
| 660 | .long 0,0 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 661 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 662 | # else |
| 663 | # error "Kernel PMDs should be 1, 2 or 3" |
| 664 | # endif |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 665 | .align PAGE_SIZE /* needs to be page-sized too */ |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 666 | #endif |
| 667 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | .data |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 669 | .balign 4 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 670 | ENTRY(stack_start) |
| 671 | .long init_thread_union+THREAD_SIZE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 673 | early_recursion_flag: |
| 674 | .long 0 |
| 675 | |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 676 | ready: .byte 0 |
| 677 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | int_msg: |
Ingo Molnar | d5e397c | 2009-01-26 06:09:00 +0100 | [diff] [blame] | 679 | .asciz "Unknown interrupt or fault at: %p %p %p\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 681 | fault_msg: |
Vegard Nossum | 575ca73 | 2008-04-25 21:02:34 +0200 | [diff] [blame] | 682 | /* fault info: */ |
| 683 | .ascii "BUG: Int %d: CR2 %p\n" |
| 684 | /* pusha regs: */ |
| 685 | .ascii " EDI %p ESI %p EBP %p ESP %p\n" |
| 686 | .ascii " EBX %p EDX %p ECX %p EAX %p\n" |
| 687 | /* fault frame: */ |
| 688 | .ascii " err %p EIP %p CS %p flg %p\n" |
| 689 | .ascii "Stack: %p %p %p %p %p %p %p %p\n" |
| 690 | .ascii " %p %p %p %p %p %p %p %p\n" |
| 691 | .asciz " %p %p %p %p %p %p %p %p\n" |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 692 | |
Thomas Gleixner | 9702785 | 2007-10-11 11:16:51 +0200 | [diff] [blame] | 693 | #include "../../x86/xen/xen-head.S" |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 694 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | /* |
| 696 | * The IDT and GDT 'descriptors' are a strange 48-bit object |
| 697 | * only used by the lidt and lgdt instructions. They are not |
| 698 | * like usual segment descriptors - they consist of a 16-bit |
| 699 | * segment size, and 32-bit linear address value: |
| 700 | */ |
| 701 | |
| 702 | .globl boot_gdt_descr |
| 703 | .globl idt_descr |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | |
| 705 | ALIGN |
| 706 | # early boot GDT descriptor (must use 1:1 address mapping) |
| 707 | .word 0 # 32 bit align gdt_desc.address |
| 708 | boot_gdt_descr: |
| 709 | .word __BOOT_DS+7 |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 710 | .long boot_gdt - __PAGE_OFFSET |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | |
| 712 | .word 0 # 32-bit align idt_desc.address |
| 713 | idt_descr: |
| 714 | .word IDT_ENTRIES*8-1 # idt contains 256 entries |
| 715 | .long idt_table |
| 716 | |
| 717 | # boot GDT descriptor (later on used by CPU#0): |
| 718 | .word 0 # 32 bit align gdt_desc.address |
Rusty Russell | 2a57ff1 | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 719 | ENTRY(early_gdt_descr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | .word GDT_ENTRIES*8-1 |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 721 | .long gdt_page /* Overwritten for secondary CPUs */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | /* |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 724 | * The boot_gdt must mirror the equivalent in setup.S and is |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | * used only for booting. |
| 726 | */ |
| 727 | .align L1_CACHE_BYTES |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 728 | ENTRY(boot_gdt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | .fill GDT_ENTRY_BOOT_CS,8,0 |
| 730 | .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ |
| 731 | .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ |