| #include <linux/bootmem.h> |
| #include <linux/linkage.h> |
| #include <linux/bitops.h> |
| #include <linux/kernel.h> |
| #include <linux/export.h> |
| #include <linux/percpu.h> |
| #include <linux/string.h> |
| #include <linux/ctype.h> |
| #include <linux/delay.h> |
| #include <linux/sched/mm.h> |
| #include <linux/sched/clock.h> |
| #include <linux/sched/task.h> |
| #include <linux/sched/smt.h> |
| #include <linux/init.h> |
| #include <linux/kprobes.h> |
| #include <linux/kgdb.h> |
| #include <linux/mem_encrypt.h> |
| #include <linux/smp.h> |
| #include <linux/cpu.h> |
| #include <linux/io.h> |
| #include <linux/syscore_ops.h> |
| |
| #include <asm/stackprotector.h> |
| #include <linux/utsname.h> |
| |
| #include <asm/alternative.h> |
| #include <asm/perf_event.h> |
| #include <asm/mmu_context.h> |
| #include <asm/archrandom.h> |
| #include <asm/hypervisor.h> |
| #include <asm/processor.h> |
| #include <asm/tlbflush.h> |
| #include <asm/debugreg.h> |
| #include <asm/sections.h> |
| #include <asm/vsyscall.h> |
| #include <linux/topology.h> |
| #include <linux/cpumask.h> |
| #include <asm/pgtable.h> |
| #include <linux/atomic.h> |
| #include <asm/proto.h> |
| #include <asm/setup.h> |
| #include <asm/apic.h> |
| #include <asm/desc.h> |
| #include <asm/fpu/internal.h> |
| #include <asm/mtrr.h> |
| #include <asm/hwcap2.h> |
| #include <linux/numa.h> |
| #include <asm/asm.h> |
| #include <asm/bugs.h> |
| #include <asm/cpu.h> |
| #include <asm/mce.h> |
| #include <asm/msr.h> |
| #include <asm/pat.h> |
| #include <asm/microcode.h> |
| #include <asm/microcode_intel.h> |
| #include <asm/intel-family.h> |
| #include <asm/cpu_device_id.h> |
| |
| #ifdef CONFIG_X86_LOCAL_APIC |
| #include <asm/uv/uv.h> |
| #endif |
| #include <asm/set_memory.h> |
| |
| #include "cpu.h" |
| |
| u32 elf_hwcap2 __read_mostly; |
| |
| /* all of these masks are initialized in setup_cpu_local_masks() */ |
| cpumask_var_t cpu_initialized_mask; |
| cpumask_var_t cpu_callout_mask; |
| cpumask_var_t cpu_callin_mask; |
| |
| /* representing cpus for which sibling maps can be computed */ |
| cpumask_var_t cpu_sibling_setup_mask; |
| |
| /* Number of siblings per CPU package */ |
| int smp_num_siblings = 1; |
| EXPORT_SYMBOL(smp_num_siblings); |
| |
| /* Last level cache ID of each logical CPU */ |
| DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; |
| |
| /* correctly size the local cpu masks */ |
| void __init setup_cpu_local_masks(void) |
| { |
| alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
| alloc_bootmem_cpumask_var(&cpu_callin_mask); |
| alloc_bootmem_cpumask_var(&cpu_callout_mask); |
| alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
| } |
| |
| static void default_init(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_X86_64 |
| cpu_detect_cache_sizes(c); |
| #else |
| /* Not much we can do here... */ |
| /* Check if at least it has cpuid */ |
| if (c->cpuid_level == -1) { |
| /* No cpuid. It must be an ancient CPU */ |
| if (c->x86 == 4) |
| strcpy(c->x86_model_id, "486"); |
| else if (c->x86 == 3) |
| strcpy(c->x86_model_id, "386"); |
| } |
| #endif |
| } |
| |
| static const struct cpu_dev default_cpu = { |
| .c_init = default_init, |
| .c_vendor = "Unknown", |
| .c_x86_vendor = X86_VENDOR_UNKNOWN, |
| }; |
| |
| static const struct cpu_dev *this_cpu = &default_cpu; |
| |
| DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
| #ifdef CONFIG_X86_64 |
| /* |
| * We need valid kernel segments for data and code in long mode too |
| * IRET will check the segment types kkeil 2000/10/28 |
| * Also sysret mandates a special GDT layout |
| * |
| * TLS descriptors are currently at a different place compared to i386. |
| * Hopefully nobody expects them at a fixed place (Wine?) |
| */ |
| [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
| [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), |
| [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), |
| [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), |
| [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), |
| [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), |
| #else |
| [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
| [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), |
| [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), |
| /* |
| * Segments used for calling PnP BIOS have byte granularity. |
| * They code segments and data segments have fixed 64k limits, |
| * the transfer segment sizes are set at run time. |
| */ |
| /* 32-bit code */ |
| [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
| /* 16-bit code */ |
| [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
| /* 16-bit data */ |
| [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
| /* 16-bit data */ |
| [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
| /* 16-bit data */ |
| [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
| /* |
| * The APM segments have byte granularity and their bases |
| * are set at run time. All have 64k limits. |
| */ |
| /* 32-bit code */ |
| [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
| /* 16-bit code */ |
| [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
| /* data */ |
| [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
| |
| [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| GDT_STACK_CANARY_INIT |
| #endif |
| } }; |
| EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
| |
| static int __init x86_mpx_setup(char *s) |
| { |
| /* require an exact match without trailing characters */ |
| if (strlen(s)) |
| return 0; |
| |
| /* do not emit a message if the feature is not present */ |
| if (!boot_cpu_has(X86_FEATURE_MPX)) |
| return 1; |
| |
| setup_clear_cpu_cap(X86_FEATURE_MPX); |
| pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); |
| return 1; |
| } |
| __setup("nompx", x86_mpx_setup); |
| |
| #ifdef CONFIG_X86_64 |
| static int __init x86_nopcid_setup(char *s) |
| { |
| /* nopcid doesn't accept parameters */ |
| if (s) |
| return -EINVAL; |
| |
| /* do not emit a message if the feature is not present */ |
| if (!boot_cpu_has(X86_FEATURE_PCID)) |
| return 0; |
| |
| setup_clear_cpu_cap(X86_FEATURE_PCID); |
| pr_info("nopcid: PCID feature disabled\n"); |
| return 0; |
| } |
| early_param("nopcid", x86_nopcid_setup); |
| #endif |
| |
| static int __init x86_noinvpcid_setup(char *s) |
| { |
| /* noinvpcid doesn't accept parameters */ |
| if (s) |
| return -EINVAL; |
| |
| /* do not emit a message if the feature is not present */ |
| if (!boot_cpu_has(X86_FEATURE_INVPCID)) |
| return 0; |
| |
| setup_clear_cpu_cap(X86_FEATURE_INVPCID); |
| pr_info("noinvpcid: INVPCID feature disabled\n"); |
| return 0; |
| } |
| early_param("noinvpcid", x86_noinvpcid_setup); |
| |
| #ifdef CONFIG_X86_32 |
| static int cachesize_override = -1; |
| static int disable_x86_serial_nr = 1; |
| |
| static int __init cachesize_setup(char *str) |
| { |
| get_option(&str, &cachesize_override); |
| return 1; |
| } |
| __setup("cachesize=", cachesize_setup); |
| |
| static int __init x86_sep_setup(char *s) |
| { |
| setup_clear_cpu_cap(X86_FEATURE_SEP); |
| return 1; |
| } |
| __setup("nosep", x86_sep_setup); |
| |
| /* Standard macro to see if a specific flag is changeable */ |
| static inline int flag_is_changeable_p(u32 flag) |
| { |
| u32 f1, f2; |
| |
| /* |
| * Cyrix and IDT cpus allow disabling of CPUID |
| * so the code below may return different results |
| * when it is executed before and after enabling |
| * the CPUID. Add "volatile" to not allow gcc to |
| * optimize the subsequent calls to this function. |
| */ |
| asm volatile ("pushfl \n\t" |
| "pushfl \n\t" |
| "popl %0 \n\t" |
| "movl %0, %1 \n\t" |
| "xorl %2, %0 \n\t" |
| "pushl %0 \n\t" |
| "popfl \n\t" |
| "pushfl \n\t" |
| "popl %0 \n\t" |
| "popfl \n\t" |
| |
| : "=&r" (f1), "=&r" (f2) |
| : "ir" (flag)); |
| |
| return ((f1^f2) & flag) != 0; |
| } |
| |
| /* Probe for the CPUID instruction */ |
| int have_cpuid_p(void) |
| { |
| return flag_is_changeable_p(X86_EFLAGS_ID); |
| } |
| |
| static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| { |
| unsigned long lo, hi; |
| |
| if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
| return; |
| |
| /* Disable processor serial number: */ |
| |
| rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| lo |= 0x200000; |
| wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| |
| pr_notice("CPU serial number disabled.\n"); |
| clear_cpu_cap(c, X86_FEATURE_PN); |
| |
| /* Disabling the serial number may affect the cpuid level */ |
| c->cpuid_level = cpuid_eax(0); |
| } |
| |
| static int __init x86_serial_nr_setup(char *s) |
| { |
| disable_x86_serial_nr = 0; |
| return 1; |
| } |
| __setup("serialnumber", x86_serial_nr_setup); |
| #else |
| static inline int flag_is_changeable_p(u32 flag) |
| { |
| return 1; |
| } |
| static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| { |
| } |
| #endif |
| |
| static __init int setup_disable_smep(char *arg) |
| { |
| setup_clear_cpu_cap(X86_FEATURE_SMEP); |
| /* Check for things that depend on SMEP being enabled: */ |
| check_mpx_erratum(&boot_cpu_data); |
| return 1; |
| } |
| __setup("nosmep", setup_disable_smep); |
| |
| static __always_inline void setup_smep(struct cpuinfo_x86 *c) |
| { |
| if (cpu_has(c, X86_FEATURE_SMEP)) |
| cr4_set_bits(X86_CR4_SMEP); |
| } |
| |
| static __init int setup_disable_smap(char *arg) |
| { |
| setup_clear_cpu_cap(X86_FEATURE_SMAP); |
| return 1; |
| } |
| __setup("nosmap", setup_disable_smap); |
| |
| static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
| { |
| unsigned long eflags = native_save_fl(); |
| |
| /* This should have been cleared long ago */ |
| BUG_ON(eflags & X86_EFLAGS_AC); |
| |
| if (cpu_has(c, X86_FEATURE_SMAP)) { |
| #ifdef CONFIG_X86_SMAP |
| cr4_set_bits(X86_CR4_SMAP); |
| #else |
| cr4_clear_bits(X86_CR4_SMAP); |
| #endif |
| } |
| } |
| |
| /* |
| * Protection Keys are not available in 32-bit mode. |
| */ |
| static bool pku_disabled; |
| |
| static __always_inline void setup_pku(struct cpuinfo_x86 *c) |
| { |
| /* check the boot processor, plus compile options for PKU: */ |
| if (!cpu_feature_enabled(X86_FEATURE_PKU)) |
| return; |
| /* checks the actual processor's cpuid bits: */ |
| if (!cpu_has(c, X86_FEATURE_PKU)) |
| return; |
| if (pku_disabled) |
| return; |
| |
| cr4_set_bits(X86_CR4_PKE); |
| /* |
| * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE |
| * cpuid bit to be set. We need to ensure that we |
| * update that bit in this CPU's "cpu_info". |
| */ |
| set_cpu_cap(c, X86_FEATURE_OSPKE); |
| } |
| |
| #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
| static __init int setup_disable_pku(char *arg) |
| { |
| /* |
| * Do not clear the X86_FEATURE_PKU bit. All of the |
| * runtime checks are against OSPKE so clearing the |
| * bit does nothing. |
| * |
| * This way, we will see "pku" in cpuinfo, but not |
| * "ospke", which is exactly what we want. It shows |
| * that the CPU has PKU, but the OS has not enabled it. |
| * This happens to be exactly how a system would look |
| * if we disabled the config option. |
| */ |
| pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); |
| pku_disabled = true; |
| return 1; |
| } |
| __setup("nopku", setup_disable_pku); |
| #endif /* CONFIG_X86_64 */ |
| |
| /* |
| * Some CPU features depend on higher CPUID levels, which may not always |
| * be available due to CPUID level capping or broken virtualization |
| * software. Add those features to this table to auto-disable them. |
| */ |
| struct cpuid_dependent_feature { |
| u32 feature; |
| u32 level; |
| }; |
| |
| static const struct cpuid_dependent_feature |
| cpuid_dependent_features[] = { |
| { X86_FEATURE_MWAIT, 0x00000005 }, |
| { X86_FEATURE_DCA, 0x00000009 }, |
| { X86_FEATURE_XSAVE, 0x0000000d }, |
| { 0, 0 } |
| }; |
| |
| static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
| { |
| const struct cpuid_dependent_feature *df; |
| |
| for (df = cpuid_dependent_features; df->feature; df++) { |
| |
| if (!cpu_has(c, df->feature)) |
| continue; |
| /* |
| * Note: cpuid_level is set to -1 if unavailable, but |
| * extended_extended_level is set to 0 if unavailable |
| * and the legitimate extended levels are all negative |
| * when signed; hence the weird messing around with |
| * signs here... |
| */ |
| if (!((s32)df->level < 0 ? |
| (u32)df->level > (u32)c->extended_cpuid_level : |
| (s32)df->level > (s32)c->cpuid_level)) |
| continue; |
| |
| clear_cpu_cap(c, df->feature); |
| if (!warn) |
| continue; |
| |
| pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", |
| x86_cap_flag(df->feature), df->level); |
| } |
| } |
| |
| /* |
| * Naming convention should be: <Name> [(<Codename>)] |
| * This table only is used unless init_<vendor>() below doesn't set it; |
| * in particular, if CPUID levels 0x80000002..4 are supported, this |
| * isn't used |
| */ |
| |
| /* Look up CPU names by table lookup. */ |
| static const char *table_lookup_model(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_X86_32 |
| const struct legacy_cpu_model_info *info; |
| |
| if (c->x86_model >= 16) |
| return NULL; /* Range check */ |
| |
| if (!this_cpu) |
| return NULL; |
| |
| info = this_cpu->legacy_models; |
| |
| while (info->family) { |
| if (info->family == c->x86) |
| return info->model_names[c->x86_model]; |
| info++; |
| } |
| #endif |
| return NULL; /* Not found */ |
| } |
| |
| __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; |
| __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; |
| |
| void load_percpu_segment(int cpu) |
| { |
| #ifdef CONFIG_X86_32 |
| loadsegment(fs, __KERNEL_PERCPU); |
| #else |
| __loadsegment_simple(gs, 0); |
| wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); |
| #endif |
| load_stack_canary_segment(); |
| } |
| |
| #ifdef CONFIG_X86_32 |
| /* The 32-bit entry code needs to find cpu_entry_area. */ |
| DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); |
| #endif |
| |
| #ifdef CONFIG_X86_64 |
| /* |
| * Special IST stacks which the CPU switches to when it calls |
| * an IST-marked descriptor entry. Up to 7 stacks (hardware |
| * limit), all of them are 4K, except the debug stack which |
| * is 8K. |
| */ |
| static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { |
| [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, |
| [DEBUG_STACK - 1] = DEBUG_STKSZ |
| }; |
| #endif |
| |
| /* Load the original GDT from the per-cpu structure */ |
| void load_direct_gdt(int cpu) |
| { |
| struct desc_ptr gdt_descr; |
| |
| gdt_descr.address = (long)get_cpu_gdt_rw(cpu); |
| gdt_descr.size = GDT_SIZE - 1; |
| load_gdt(&gdt_descr); |
| } |
| EXPORT_SYMBOL_GPL(load_direct_gdt); |
| |
| /* Load a fixmap remapping of the per-cpu GDT */ |
| void load_fixmap_gdt(int cpu) |
| { |
| struct desc_ptr gdt_descr; |
| |
| gdt_descr.address = (long)get_cpu_gdt_ro(cpu); |
| gdt_descr.size = GDT_SIZE - 1; |
| load_gdt(&gdt_descr); |
| } |
| EXPORT_SYMBOL_GPL(load_fixmap_gdt); |
| |
| /* |
| * Current gdt points %fs at the "master" per-cpu area: after this, |
| * it's on the real one. |
| */ |
| void switch_to_new_gdt(int cpu) |
| { |
| /* Load the original GDT */ |
| load_direct_gdt(cpu); |
| /* Reload the per-cpu base */ |
| load_percpu_segment(cpu); |
| } |
| |
| static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
| |
| static void get_model_name(struct cpuinfo_x86 *c) |
| { |
| unsigned int *v; |
| char *p, *q, *s; |
| |
| if (c->extended_cpuid_level < 0x80000004) |
| return; |
| |
| v = (unsigned int *)c->x86_model_id; |
| cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
| cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
| cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
| c->x86_model_id[48] = 0; |
| |
| /* Trim whitespace */ |
| p = q = s = &c->x86_model_id[0]; |
| |
| while (*p == ' ') |
| p++; |
| |
| while (*p) { |
| /* Note the last non-whitespace index */ |
| if (!isspace(*p)) |
| s = q; |
| |
| *q++ = *p++; |
| } |
| |
| *(s + 1) = '\0'; |
| } |
| |
| void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
| { |
| unsigned int n, dummy, ebx, ecx, edx, l2size; |
| |
| n = c->extended_cpuid_level; |
| |
| if (n >= 0x80000005) { |
| cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
| c->x86_cache_size = (ecx>>24) + (edx>>24); |
| #ifdef CONFIG_X86_64 |
| /* On K8 L1 TLB is inclusive, so don't count it */ |
| c->x86_tlbsize = 0; |
| #endif |
| } |
| |
| if (n < 0x80000006) /* Some chips just has a large L1. */ |
| return; |
| |
| cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
| l2size = ecx >> 16; |
| |
| #ifdef CONFIG_X86_64 |
| c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); |
| #else |
| /* do processor-specific cache resizing */ |
| if (this_cpu->legacy_cache_size) |
| l2size = this_cpu->legacy_cache_size(c, l2size); |
| |
| /* Allow user to override all this if necessary. */ |
| if (cachesize_override != -1) |
| l2size = cachesize_override; |
| |
| if (l2size == 0) |
| return; /* Again, no L2 cache is possible */ |
| #endif |
| |
| c->x86_cache_size = l2size; |
| } |
| |
| u16 __read_mostly tlb_lli_4k[NR_INFO]; |
| u16 __read_mostly tlb_lli_2m[NR_INFO]; |
| u16 __read_mostly tlb_lli_4m[NR_INFO]; |
| u16 __read_mostly tlb_lld_4k[NR_INFO]; |
| u16 __read_mostly tlb_lld_2m[NR_INFO]; |
| u16 __read_mostly tlb_lld_4m[NR_INFO]; |
| u16 __read_mostly tlb_lld_1g[NR_INFO]; |
| |
| static void cpu_detect_tlb(struct cpuinfo_x86 *c) |
| { |
| if (this_cpu->c_detect_tlb) |
| this_cpu->c_detect_tlb(c); |
| |
| pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", |
| tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
| tlb_lli_4m[ENTRIES]); |
| |
| pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", |
| tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], |
| tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); |
| } |
| |
| int detect_ht_early(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_SMP |
| u32 eax, ebx, ecx, edx; |
| |
| if (!cpu_has(c, X86_FEATURE_HT)) |
| return -1; |
| |
| if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
| return -1; |
| |
| if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
| return -1; |
| |
| cpuid(1, &eax, &ebx, &ecx, &edx); |
| |
| smp_num_siblings = (ebx & 0xff0000) >> 16; |
| if (smp_num_siblings == 1) |
| pr_info_once("CPU0: Hyper-Threading is disabled\n"); |
| #endif |
| return 0; |
| } |
| |
| void detect_ht(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_SMP |
| int index_msb, core_bits; |
| |
| if (detect_ht_early(c) < 0) |
| return; |
| |
| index_msb = get_count_order(smp_num_siblings); |
| c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
| |
| smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
| |
| index_msb = get_count_order(smp_num_siblings); |
| |
| core_bits = get_count_order(c->x86_max_cores); |
| |
| c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
| ((1 << core_bits) - 1); |
| #endif |
| } |
| |
| static void get_cpu_vendor(struct cpuinfo_x86 *c) |
| { |
| char *v = c->x86_vendor_id; |
| int i; |
| |
| for (i = 0; i < X86_VENDOR_NUM; i++) { |
| if (!cpu_devs[i]) |
| break; |
| |
| if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
| (cpu_devs[i]->c_ident[1] && |
| !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
| |
| this_cpu = cpu_devs[i]; |
| c->x86_vendor = this_cpu->c_x86_vendor; |
| return; |
| } |
| } |
| |
| pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ |
| "CPU: Your system may be unstable.\n", v); |
| |
| c->x86_vendor = X86_VENDOR_UNKNOWN; |
| this_cpu = &default_cpu; |
| } |
| |
| void cpu_detect(struct cpuinfo_x86 *c) |
| { |
| /* Get vendor name */ |
| cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
| (unsigned int *)&c->x86_vendor_id[0], |
| (unsigned int *)&c->x86_vendor_id[8], |
| (unsigned int *)&c->x86_vendor_id[4]); |
| |
| c->x86 = 4; |
| /* Intel-defined flags: level 0x00000001 */ |
| if (c->cpuid_level >= 0x00000001) { |
| u32 junk, tfms, cap0, misc; |
| |
| cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
| c->x86 = x86_family(tfms); |
| c->x86_model = x86_model(tfms); |
| c->x86_stepping = x86_stepping(tfms); |
| |
| if (cap0 & (1<<19)) { |
| c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
| c->x86_cache_alignment = c->x86_clflush_size; |
| } |
| } |
| } |
| |
| static void apply_forced_caps(struct cpuinfo_x86 *c) |
| { |
| int i; |
| |
| for (i = 0; i < NCAPINTS + NBUGINTS; i++) { |
| c->x86_capability[i] &= ~cpu_caps_cleared[i]; |
| c->x86_capability[i] |= cpu_caps_set[i]; |
| } |
| } |
| |
| static void init_speculation_control(struct cpuinfo_x86 *c) |
| { |
| /* |
| * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, |
| * and they also have a different bit for STIBP support. Also, |
| * a hypervisor might have set the individual AMD bits even on |
| * Intel CPUs, for finer-grained selection of what's available. |
| */ |
| if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { |
| set_cpu_cap(c, X86_FEATURE_IBRS); |
| set_cpu_cap(c, X86_FEATURE_IBPB); |
| set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| } |
| |
| if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) |
| set_cpu_cap(c, X86_FEATURE_STIBP); |
| |
| if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || |
| cpu_has(c, X86_FEATURE_VIRT_SSBD)) |
| set_cpu_cap(c, X86_FEATURE_SSBD); |
| |
| if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { |
| set_cpu_cap(c, X86_FEATURE_IBRS); |
| set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| } |
| |
| if (cpu_has(c, X86_FEATURE_AMD_IBPB)) |
| set_cpu_cap(c, X86_FEATURE_IBPB); |
| |
| if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { |
| set_cpu_cap(c, X86_FEATURE_STIBP); |
| set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| } |
| |
| if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { |
| set_cpu_cap(c, X86_FEATURE_SSBD); |
| set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); |
| clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); |
| } |
| } |
| |
| static void init_cqm(struct cpuinfo_x86 *c) |
| { |
| if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { |
| c->x86_cache_max_rmid = -1; |
| c->x86_cache_occ_scale = -1; |
| return; |
| } |
| |
| /* will be overridden if occupancy monitoring exists */ |
| c->x86_cache_max_rmid = cpuid_ebx(0xf); |
| |
| if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || |
| cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || |
| cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { |
| u32 eax, ebx, ecx, edx; |
| |
| /* QoS sub-leaf, EAX=0Fh, ECX=1 */ |
| cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx); |
| |
| c->x86_cache_max_rmid = ecx; |
| c->x86_cache_occ_scale = ebx; |
| } |
| } |
| |
| void get_cpu_cap(struct cpuinfo_x86 *c) |
| { |
| u32 eax, ebx, ecx, edx; |
| |
| /* Intel-defined flags: level 0x00000001 */ |
| if (c->cpuid_level >= 0x00000001) { |
| cpuid(0x00000001, &eax, &ebx, &ecx, &edx); |
| |
| c->x86_capability[CPUID_1_ECX] = ecx; |
| c->x86_capability[CPUID_1_EDX] = edx; |
| } |
| |
| /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ |
| if (c->cpuid_level >= 0x00000006) |
| c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); |
| |
| /* Additional Intel-defined flags: level 0x00000007 */ |
| if (c->cpuid_level >= 0x00000007) { |
| cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); |
| c->x86_capability[CPUID_7_0_EBX] = ebx; |
| c->x86_capability[CPUID_7_ECX] = ecx; |
| c->x86_capability[CPUID_7_EDX] = edx; |
| } |
| |
| /* Extended state features: level 0x0000000d */ |
| if (c->cpuid_level >= 0x0000000d) { |
| cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); |
| |
| c->x86_capability[CPUID_D_1_EAX] = eax; |
| } |
| |
| /* AMD-defined flags: level 0x80000001 */ |
| eax = cpuid_eax(0x80000000); |
| c->extended_cpuid_level = eax; |
| |
| if ((eax & 0xffff0000) == 0x80000000) { |
| if (eax >= 0x80000001) { |
| cpuid(0x80000001, &eax, &ebx, &ecx, &edx); |
| |
| c->x86_capability[CPUID_8000_0001_ECX] = ecx; |
| c->x86_capability[CPUID_8000_0001_EDX] = edx; |
| } |
| } |
| |
| if (c->extended_cpuid_level >= 0x80000007) { |
| cpuid(0x80000007, &eax, &ebx, &ecx, &edx); |
| |
| c->x86_capability[CPUID_8000_0007_EBX] = ebx; |
| c->x86_power = edx; |
| } |
| |
| if (c->extended_cpuid_level >= 0x80000008) { |
| cpuid(0x80000008, &eax, &ebx, &ecx, &edx); |
| |
| c->x86_virt_bits = (eax >> 8) & 0xff; |
| c->x86_phys_bits = eax & 0xff; |
| c->x86_capability[CPUID_8000_0008_EBX] = ebx; |
| } |
| #ifdef CONFIG_X86_32 |
| else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) |
| c->x86_phys_bits = 36; |
| #endif |
| c->x86_cache_bits = c->x86_phys_bits; |
| |
| if (c->extended_cpuid_level >= 0x8000000a) |
| c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); |
| |
| init_scattered_cpuid_features(c); |
| init_speculation_control(c); |
| init_cqm(c); |
| |
| /* |
| * Clear/Set all flags overridden by options, after probe. |
| * This needs to happen each time we re-probe, which may happen |
| * several times during CPU initialization. |
| */ |
| apply_forced_caps(c); |
| } |
| |
| static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_X86_32 |
| int i; |
| |
| /* |
| * First of all, decide if this is a 486 or higher |
| * It's a 486 if we can modify the AC flag |
| */ |
| if (flag_is_changeable_p(X86_EFLAGS_AC)) |
| c->x86 = 4; |
| else |
| c->x86 = 3; |
| |
| for (i = 0; i < X86_VENDOR_NUM; i++) |
| if (cpu_devs[i] && cpu_devs[i]->c_identify) { |
| c->x86_vendor_id[0] = 0; |
| cpu_devs[i]->c_identify(c); |
| if (c->x86_vendor_id[0]) { |
| get_cpu_vendor(c); |
| break; |
| } |
| } |
| #endif |
| } |
| |
| #define NO_SPECULATION BIT(0) |
| #define NO_MELTDOWN BIT(1) |
| #define NO_SSB BIT(2) |
| #define NO_L1TF BIT(3) |
| #define NO_MDS BIT(4) |
| #define MSBDS_ONLY BIT(5) |
| #define NO_SWAPGS BIT(6) |
| #define NO_ITLB_MULTIHIT BIT(7) |
| #define NO_MMIO BIT(8) |
| #define NO_EIBRS_PBRSB BIT(9) |
| |
| #define VULNWL(_vendor, _family, _model, _whitelist) \ |
| { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } |
| |
| #define VULNWL_INTEL(model, whitelist) \ |
| VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) |
| |
| #define VULNWL_AMD(family, whitelist) \ |
| VULNWL(AMD, family, X86_MODEL_ANY, whitelist) |
| |
| static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { |
| VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), |
| VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), |
| VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), |
| VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), |
| |
| /* Intel Family 6 */ |
| VULNWL_INTEL(TIGERLAKE, NO_MMIO), |
| VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), |
| VULNWL_INTEL(ALDERLAKE, NO_MMIO), |
| VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), |
| |
| VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), |
| |
| VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| |
| VULNWL_INTEL(CORE_YONAH, NO_SSB), |
| |
| VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), |
| |
| VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
| VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
| VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), |
| |
| /* |
| * Technically, swapgs isn't serializing on AMD (despite it previously |
| * being documented as such in the APM). But according to AMD, %gs is |
| * updated non-speculatively, and the issuing of %gs-relative memory |
| * operands will be blocked until the %gs update completes, which is |
| * good enough for our purposes. |
| */ |
| |
| VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB), |
| VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB), |
| VULNWL_INTEL(ATOM_TREMONT_X, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), |
| |
| /* AMD Family 0xf - 0x12 */ |
| VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
| VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
| VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
| VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
| |
| /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ |
| VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), |
| {} |
| }; |
| |
| #define VULNBL(vendor, family, model, blacklist) \ |
| X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) |
| |
| #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ |
| X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ |
| INTEL_FAM6_##model, steppings, \ |
| X86_FEATURE_ANY, issues) |
| |
| #define VULNBL_AMD(family, blacklist) \ |
| VULNBL(AMD, family, X86_MODEL_ANY, blacklist) |
| |
| #define SRBDS BIT(0) |
| /* CPU is affected by X86_BUG_MMIO_STALE_DATA */ |
| #define MMIO BIT(1) |
| /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ |
| #define MMIO_SBDS BIT(2) |
| /* CPU is affected by RETbleed, speculating where you would not expect it */ |
| #define RETBLEED BIT(3) |
| /* CPU is affected by SMT (cross-thread) return predictions */ |
| #define SMT_RSB BIT(4) |
| /* CPU is affected by SRSO */ |
| #define SRSO BIT(5) |
| /* CPU is affected by GDS */ |
| #define GDS BIT(6) |
| |
| static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { |
| VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), |
| VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS), |
| VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS), |
| VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS), |
| VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), |
| VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPING_ANY, MMIO), |
| VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS), |
| VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), |
| VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS), |
| VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), |
| VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), |
| VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), |
| VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), |
| VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), |
| VULNBL_INTEL_STEPPINGS(CANNONLAKE_MOBILE,X86_STEPPING_ANY, RETBLEED), |
| VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), |
| VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPING_ANY, MMIO | GDS), |
| VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), |
| VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), |
| VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), |
| VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), |
| VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), |
| VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), |
| VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), |
| VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), |
| VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_X, X86_STEPPING_ANY, MMIO), |
| VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), |
| |
| VULNBL_AMD(0x15, RETBLEED), |
| VULNBL_AMD(0x16, RETBLEED), |
| VULNBL_AMD(0x17, RETBLEED), |
| {} |
| }; |
| |
| static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) |
| { |
| const struct x86_cpu_id *m = x86_match_cpu(table); |
| |
| return m && !!(m->driver_data & which); |
| } |
| |
| u64 x86_read_arch_cap_msr(void) |
| { |
| u64 ia32_cap = 0; |
| |
| if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) |
| rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
| |
| return ia32_cap; |
| } |
| |
| static bool arch_cap_mmio_immune(u64 ia32_cap) |
| { |
| return (ia32_cap & ARCH_CAP_FBSDP_NO && |
| ia32_cap & ARCH_CAP_PSDP_NO && |
| ia32_cap & ARCH_CAP_SBDR_SSDP_NO); |
| } |
| |
| static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) |
| { |
| u64 ia32_cap = x86_read_arch_cap_msr(); |
| |
| /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ |
| if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && |
| !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) |
| setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); |
| |
| if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) |
| return; |
| |
| setup_force_cpu_bug(X86_BUG_SPECTRE_V1); |
| setup_force_cpu_bug(X86_BUG_SPECTRE_V2); |
| |
| if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && |
| !(ia32_cap & ARCH_CAP_SSB_NO) && |
| !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) |
| setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); |
| |
| if (ia32_cap & ARCH_CAP_IBRS_ALL) |
| setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); |
| |
| if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && |
| !(ia32_cap & ARCH_CAP_MDS_NO)) { |
| setup_force_cpu_bug(X86_BUG_MDS); |
| if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) |
| setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); |
| } |
| |
| if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) |
| setup_force_cpu_bug(X86_BUG_SWAPGS); |
| |
| /* |
| * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: |
| * - TSX is supported or |
| * - TSX_CTRL is present |
| * |
| * TSX_CTRL check is needed for cases when TSX could be disabled before |
| * the kernel boot e.g. kexec. |
| * TSX_CTRL check alone is not sufficient for cases when the microcode |
| * update is not present or running as guest that don't get TSX_CTRL. |
| */ |
| if (!(ia32_cap & ARCH_CAP_TAA_NO) && |
| (cpu_has(c, X86_FEATURE_RTM) || |
| (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) |
| setup_force_cpu_bug(X86_BUG_TAA); |
| |
| /* |
| * SRBDS affects CPUs which support RDRAND or RDSEED and are listed |
| * in the vulnerability blacklist. |
| * |
| * Some of the implications and mitigation of Shared Buffers Data |
| * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as |
| * SRBDS. |
| */ |
| if ((cpu_has(c, X86_FEATURE_RDRAND) || |
| cpu_has(c, X86_FEATURE_RDSEED)) && |
| cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) |
| setup_force_cpu_bug(X86_BUG_SRBDS); |
| |
| /* |
| * Processor MMIO Stale Data bug enumeration |
| * |
| * Affected CPU list is generally enough to enumerate the vulnerability, |
| * but for virtualization case check for ARCH_CAP MSR bits also, VMM may |
| * not want the guest to enumerate the bug. |
| * |
| * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, |
| * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. |
| */ |
| if (!arch_cap_mmio_immune(ia32_cap)) { |
| if (cpu_matches(cpu_vuln_blacklist, MMIO)) |
| setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); |
| else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) |
| setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); |
| } |
| |
| if (!cpu_has(c, X86_FEATURE_BTC_NO)) { |
| if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) |
| setup_force_cpu_bug(X86_BUG_RETBLEED); |
| } |
| |
| if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) && |
| !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && |
| !(ia32_cap & ARCH_CAP_PBRSB_NO)) |
| setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); |
| |
| /* |
| * Check if CPU is vulnerable to GDS. If running in a virtual machine on |
| * an affected processor, the VMM may have disabled the use of GATHER by |
| * disabling AVX2. The only way to do this in HW is to clear XCR0[2], |
| * which means that AVX will be disabled. |
| */ |
| if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && |
| boot_cpu_has(X86_FEATURE_AVX)) |
| setup_force_cpu_bug(X86_BUG_GDS); |
| |
| if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) |
| return; |
| |
| /* Rogue Data Cache Load? No! */ |
| if (ia32_cap & ARCH_CAP_RDCL_NO) |
| return; |
| |
| setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); |
| |
| if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) |
| return; |
| |
| setup_force_cpu_bug(X86_BUG_L1TF); |
| } |
| |
| /* |
| * Do minimum CPU detection early. |
| * Fields really needed: vendor, cpuid_level, family, model, mask, |
| * cache alignment. |
| * The others are not touched to avoid unwanted side effects. |
| * |
| * WARNING: this function is only called on the BP. Don't add code here |
| * that is supposed to run on all CPUs. |
| */ |
| static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_X86_64 |
| c->x86_clflush_size = 64; |
| c->x86_phys_bits = 36; |
| c->x86_virt_bits = 48; |
| #else |
| c->x86_clflush_size = 32; |
| c->x86_phys_bits = 32; |
| c->x86_virt_bits = 32; |
| #endif |
| c->x86_cache_alignment = c->x86_clflush_size; |
| |
| memset(&c->x86_capability, 0, sizeof c->x86_capability); |
| c->extended_cpuid_level = 0; |
| |
| if (!have_cpuid_p()) |
| identify_cpu_without_cpuid(c); |
| |
| /* cyrix could have cpuid enabled via c_identify()*/ |
| if (have_cpuid_p()) { |
| cpu_detect(c); |
| get_cpu_vendor(c); |
| get_cpu_cap(c); |
| setup_force_cpu_cap(X86_FEATURE_CPUID); |
| |
| if (this_cpu->c_early_init) |
| this_cpu->c_early_init(c); |
| |
| c->cpu_index = 0; |
| filter_cpuid_features(c, false); |
| |
| if (this_cpu->c_bsp_init) |
| this_cpu->c_bsp_init(c); |
| } else { |
| setup_clear_cpu_cap(X86_FEATURE_CPUID); |
| } |
| |
| setup_force_cpu_cap(X86_FEATURE_ALWAYS); |
| |
| cpu_set_bug_bits(c); |
| |
| #ifdef CONFIG_X86_32 |
| /* |
| * Regardless of whether PCID is enumerated, the SDM says |
| * that it can't be enabled in 32-bit mode. |
| */ |
| setup_clear_cpu_cap(X86_FEATURE_PCID); |
| #endif |
| } |
| |
| void __init early_cpu_init(void) |
| { |
| const struct cpu_dev *const *cdev; |
| int count = 0; |
| |
| #ifdef CONFIG_PROCESSOR_SELECT |
| pr_info("KERNEL supported cpus:\n"); |
| #endif |
| |
| for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
| const struct cpu_dev *cpudev = *cdev; |
| |
| if (count >= X86_VENDOR_NUM) |
| break; |
| cpu_devs[count] = cpudev; |
| count++; |
| |
| #ifdef CONFIG_PROCESSOR_SELECT |
| { |
| unsigned int j; |
| |
| for (j = 0; j < 2; j++) { |
| if (!cpudev->c_ident[j]) |
| continue; |
| pr_info(" %s %s\n", cpudev->c_vendor, |
| cpudev->c_ident[j]); |
| } |
| } |
| #endif |
| } |
| early_identify_cpu(&boot_cpu_data); |
| } |
| |
| /* |
| * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
| * unfortunately, that's not true in practice because of early VIA |
| * chips and (more importantly) broken virtualizers that are not easy |
| * to detect. In the latter case it doesn't even *fail* reliably, so |
| * probing for it doesn't even work. Disable it completely on 32-bit |
| * unless we can find a reliable way to detect all the broken cases. |
| * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
| */ |
| static void detect_nopl(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_X86_32 |
| clear_cpu_cap(c, X86_FEATURE_NOPL); |
| #else |
| set_cpu_cap(c, X86_FEATURE_NOPL); |
| #endif |
| } |
| |
| static void detect_null_seg_behavior(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_X86_64 |
| /* |
| * Empirically, writing zero to a segment selector on AMD does |
| * not clear the base, whereas writing zero to a segment |
| * selector on Intel does clear the base. Intel's behavior |
| * allows slightly faster context switches in the common case |
| * where GS is unused by the prev and next threads. |
| * |
| * Since neither vendor documents this anywhere that I can see, |
| * detect it directly instead of hardcoding the choice by |
| * vendor. |
| * |
| * I've designated AMD's behavior as the "bug" because it's |
| * counterintuitive and less friendly. |
| */ |
| |
| unsigned long old_base, tmp; |
| rdmsrl(MSR_FS_BASE, old_base); |
| wrmsrl(MSR_FS_BASE, 1); |
| loadsegment(fs, 0); |
| rdmsrl(MSR_FS_BASE, tmp); |
| if (tmp != 0) |
| set_cpu_bug(c, X86_BUG_NULL_SEG); |
| wrmsrl(MSR_FS_BASE, old_base); |
| #endif |
| } |
| |
| static void generic_identify(struct cpuinfo_x86 *c) |
| { |
| c->extended_cpuid_level = 0; |
| |
| if (!have_cpuid_p()) |
| identify_cpu_without_cpuid(c); |
| |
| /* cyrix could have cpuid enabled via c_identify()*/ |
| if (!have_cpuid_p()) |
| return; |
| |
| cpu_detect(c); |
| |
| get_cpu_vendor(c); |
| |
| get_cpu_cap(c); |
| |
| if (c->cpuid_level >= 0x00000001) { |
| c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
| #ifdef CONFIG_X86_32 |
| # ifdef CONFIG_SMP |
| c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| # else |
| c->apicid = c->initial_apicid; |
| # endif |
| #endif |
| c->phys_proc_id = c->initial_apicid; |
| } |
| |
| get_model_name(c); /* Default name */ |
| |
| detect_nopl(c); |
| |
| detect_null_seg_behavior(c); |
| |
| /* |
| * ESPFIX is a strange bug. All real CPUs have it. Paravirt |
| * systems that run Linux at CPL > 0 may or may not have the |
| * issue, but, even if they have the issue, there's absolutely |
| * nothing we can do about it because we can't use the real IRET |
| * instruction. |
| * |
| * NB: For the time being, only 32-bit kernels support |
| * X86_BUG_ESPFIX as such. 64-bit kernels directly choose |
| * whether to apply espfix using paravirt hooks. If any |
| * non-paravirt system ever shows up that does *not* have the |
| * ESPFIX issue, we can change this. |
| */ |
| #ifdef CONFIG_X86_32 |
| # ifdef CONFIG_PARAVIRT |
| do { |
| extern void native_iret(void); |
| if (pv_cpu_ops.iret == native_iret) |
| set_cpu_bug(c, X86_BUG_ESPFIX); |
| } while (0); |
| # else |
| set_cpu_bug(c, X86_BUG_ESPFIX); |
| # endif |
| #endif |
| } |
| |
| static void x86_init_cache_qos(struct cpuinfo_x86 *c) |
| { |
| /* |
| * The heavy lifting of max_rmid and cache_occ_scale are handled |
| * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu |
| * in case CQM bits really aren't there in this CPU. |
| */ |
| if (c != &boot_cpu_data) { |
| boot_cpu_data.x86_cache_max_rmid = |
| min(boot_cpu_data.x86_cache_max_rmid, |
| c->x86_cache_max_rmid); |
| } |
| } |
| |
| /* |
| * Validate that ACPI/mptables have the same information about the |
| * effective APIC id and update the package map. |
| */ |
| static void validate_apic_and_package_id(struct cpuinfo_x86 *c) |
| { |
| #ifdef CONFIG_SMP |
| unsigned int apicid, cpu = smp_processor_id(); |
| |
| apicid = apic->cpu_present_to_apicid(cpu); |
| |
| if (apicid != c->apicid) { |
| pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", |
| cpu, apicid, c->initial_apicid); |
| } |
| BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); |
| #else |
| c->logical_proc_id = 0; |
| #endif |
| } |
| |
| /* |
| * This does the hard work of actually picking apart the CPU stuff... |
| */ |
| static void identify_cpu(struct cpuinfo_x86 *c) |
| { |
| int i; |
| |
| c->loops_per_jiffy = loops_per_jiffy; |
| c->x86_cache_size = 0; |
| c->x86_vendor = X86_VENDOR_UNKNOWN; |
| c->x86_model = c->x86_stepping = 0; /* So far unknown... */ |
| c->x86_vendor_id[0] = '\0'; /* Unset */ |
| c->x86_model_id[0] = '\0'; /* Unset */ |
| c->x86_max_cores = 1; |
| c->x86_coreid_bits = 0; |
| c->cu_id = 0xff; |
| #ifdef CONFIG_X86_64 |
| c->x86_clflush_size = 64; |
| c->x86_phys_bits = 36; |
| c->x86_virt_bits = 48; |
| #else |
| c->cpuid_level = -1; /* CPUID not detected */ |
| c->x86_clflush_size = 32; |
| c->x86_phys_bits = 32; |
| c->x86_virt_bits = 32; |
| #endif |
| c->x86_cache_alignment = c->x86_clflush_size; |
| memset(&c->x86_capability, 0, sizeof c->x86_capability); |
| |
| generic_identify(c); |
| |
| if (this_cpu->c_identify) |
| this_cpu->c_identify(c); |
| |
| /* Clear/Set all flags overridden by options, after probe */ |
| apply_forced_caps(c); |
| |
| #ifdef CONFIG_X86_64 |
| c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| #endif |
| |
| /* |
| * Vendor-specific initialization. In this section we |
| * canonicalize the feature flags, meaning if there are |
| * features a certain CPU supports which CPUID doesn't |
| * tell us, CPUID claiming incorrect flags, or other bugs, |
| * we handle them here. |
| * |
| * At the end of this section, c->x86_capability better |
| * indicate the features this CPU genuinely supports! |
| */ |
| if (this_cpu->c_init) |
| this_cpu->c_init(c); |
| |
| /* Disable the PN if appropriate */ |
| squash_the_stupid_serial_number(c); |
| |
| /* Set up SMEP/SMAP */ |
| setup_smep(c); |
| setup_smap(c); |
| |
| /* |
| * The vendor-specific functions might have changed features. |
| * Now we do "generic changes." |
| */ |
| |
| /* Filter out anything that depends on CPUID levels we don't have */ |
| filter_cpuid_features(c, true); |
| |
| /* If the model name is still unset, do table lookup. */ |
| if (!c->x86_model_id[0]) { |
| const char *p; |
| p = table_lookup_model(c); |
| if (p) |
| strcpy(c->x86_model_id, p); |
| else |
| /* Last resort... */ |
| sprintf(c->x86_model_id, "%02x/%02x", |
| c->x86, c->x86_model); |
| } |
| |
| #ifdef CONFIG_X86_64 |
| detect_ht(c); |
| #endif |
| |
| x86_init_rdrand(c); |
| x86_init_cache_qos(c); |
| setup_pku(c); |
| |
| /* |
| * Clear/Set all flags overridden by options, need do it |
| * before following smp all cpus cap AND. |
| */ |
| apply_forced_caps(c); |
| |
| /* |
| * On SMP, boot_cpu_data holds the common feature set between |
| * all CPUs; so make sure that we indicate which features are |
| * common between the CPUs. The first time this routine gets |
| * executed, c == &boot_cpu_data. |
| */ |
| if (c != &boot_cpu_data) { |
| /* AND the already accumulated flags with these */ |
| for (i = 0; i < NCAPINTS; i++) |
| boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
| |
| /* OR, i.e. replicate the bug flags */ |
| for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) |
| c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; |
| } |
| |
| /* Init Machine Check Exception if available. */ |
| mcheck_cpu_init(c); |
| |
| select_idle_routine(c); |
| |
| #ifdef CONFIG_NUMA |
| numa_add_cpu(smp_processor_id()); |
| #endif |
| } |
| |
| /* |
| * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions |
| * on 32-bit kernels: |
| */ |
| #ifdef CONFIG_X86_32 |
| void enable_sep_cpu(void) |
| { |
| struct tss_struct *tss; |
| int cpu; |
| |
| if (!boot_cpu_has(X86_FEATURE_SEP)) |
| return; |
| |
| cpu = get_cpu(); |
| tss = &per_cpu(cpu_tss_rw, cpu); |
| |
| /* |
| * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- |
| * see the big comment in struct x86_hw_tss's definition. |
| */ |
| |
| tss->x86_tss.ss1 = __KERNEL_CS; |
| wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); |
| wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); |
| wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); |
| |
| put_cpu(); |
| } |
| #endif |
| |
| void __init identify_boot_cpu(void) |
| { |
| identify_cpu(&boot_cpu_data); |
| #ifdef CONFIG_X86_32 |
| sysenter_setup(); |
| enable_sep_cpu(); |
| #endif |
| cpu_detect_tlb(&boot_cpu_data); |
| tsx_init(); |
| } |
| |
| void identify_secondary_cpu(struct cpuinfo_x86 *c) |
| { |
| BUG_ON(c == &boot_cpu_data); |
| identify_cpu(c); |
| #ifdef CONFIG_X86_32 |
| enable_sep_cpu(); |
| #endif |
| mtrr_ap_init(); |
| validate_apic_and_package_id(c); |
| x86_spec_ctrl_setup_ap(); |
| update_srbds_msr(); |
| if (boot_cpu_has_bug(X86_BUG_GDS)) |
| update_gds_msr(); |
| } |
| |
| static __init int setup_noclflush(char *arg) |
| { |
| setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); |
| setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); |
| return 1; |
| } |
| __setup("noclflush", setup_noclflush); |
| |
| void print_cpu_info(struct cpuinfo_x86 *c) |
| { |
| const char *vendor = NULL; |
| |
| if (c->x86_vendor < X86_VENDOR_NUM) { |
| vendor = this_cpu->c_vendor; |
| } else { |
| if (c->cpuid_level >= 0) |
| vendor = c->x86_vendor_id; |
| } |
| |
| if (vendor && !strstr(c->x86_model_id, vendor)) |
| pr_cont("%s ", vendor); |
| |
| if (c->x86_model_id[0]) |
| pr_cont("%s", c->x86_model_id); |
| else |
| pr_cont("%d86", c->x86); |
| |
| pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); |
| |
| if (c->x86_stepping || c->cpuid_level >= 0) |
| pr_cont(", stepping: 0x%x)\n", c->x86_stepping); |
| else |
| pr_cont(")\n"); |
| } |
| |
| /* |
| * clearcpuid= was already parsed in fpu__init_parse_early_param. |
| * But we need to keep a dummy __setup around otherwise it would |
| * show up as an environment variable for init. |
| */ |
| static __init int setup_clearcpuid(char *arg) |
| { |
| return 1; |
| } |
| __setup("clearcpuid=", setup_clearcpuid); |
| |
| #ifdef CONFIG_X86_64 |
| DEFINE_PER_CPU_FIRST(union irq_stack_union, |
| irq_stack_union) __aligned(PAGE_SIZE) __visible; |
| |
| /* |
| * The following percpu variables are hot. Align current_task to |
| * cacheline size such that they fall in the same cacheline. |
| */ |
| DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = |
| &init_task; |
| EXPORT_PER_CPU_SYMBOL(current_task); |
| |
| DEFINE_PER_CPU(char *, irq_stack_ptr) = |
| init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE; |
| |
| DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; |
| |
| DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
| EXPORT_PER_CPU_SYMBOL(__preempt_count); |
| |
| /* May not be marked __init: used by software suspend */ |
| void syscall_init(void) |
| { |
| extern char _entry_trampoline[]; |
| extern char entry_SYSCALL_64_trampoline[]; |
| |
| int cpu = smp_processor_id(); |
| unsigned long SYSCALL64_entry_trampoline = |
| (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline + |
| (entry_SYSCALL_64_trampoline - _entry_trampoline); |
| |
| wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); |
| if (static_cpu_has(X86_FEATURE_PTI)) |
| wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); |
| else |
| wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); |
| |
| #ifdef CONFIG_IA32_EMULATION |
| wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); |
| /* |
| * This only works on Intel CPUs. |
| * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. |
| * This does not cause SYSENTER to jump to the wrong location, because |
| * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). |
| */ |
| wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); |
| wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1)); |
| wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); |
| #else |
| wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); |
| wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); |
| wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); |
| wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); |
| #endif |
| |
| /* Flags to clear on syscall */ |
| wrmsrl(MSR_SYSCALL_MASK, |
| X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| |
| X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); |
| } |
| |
| /* |
| * Copies of the original ist values from the tss are only accessed during |
| * debugging, no special alignment required. |
| */ |
| DEFINE_PER_CPU(struct orig_ist, orig_ist); |
| |
| static DEFINE_PER_CPU(unsigned long, debug_stack_addr); |
| DEFINE_PER_CPU(int, debug_stack_usage); |
| |
| int is_debug_stack(unsigned long addr) |
| { |
| return __this_cpu_read(debug_stack_usage) || |
| (addr <= __this_cpu_read(debug_stack_addr) && |
| addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); |
| } |
| NOKPROBE_SYMBOL(is_debug_stack); |
| |
| DEFINE_PER_CPU(u32, debug_idt_ctr); |
| |
| void debug_stack_set_zero(void) |
| { |
| this_cpu_inc(debug_idt_ctr); |
| load_current_idt(); |
| } |
| NOKPROBE_SYMBOL(debug_stack_set_zero); |
| |
| void debug_stack_reset(void) |
| { |
| if (WARN_ON(!this_cpu_read(debug_idt_ctr))) |
| return; |
| if (this_cpu_dec_return(debug_idt_ctr) == 0) |
| load_current_idt(); |
| } |
| NOKPROBE_SYMBOL(debug_stack_reset); |
| |
| #else /* CONFIG_X86_64 */ |
| |
| DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
| EXPORT_PER_CPU_SYMBOL(current_task); |
| DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
| EXPORT_PER_CPU_SYMBOL(__preempt_count); |
| |
| /* |
| * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find |
| * the top of the kernel stack. Use an extra percpu variable to track the |
| * top of the kernel stack directly. |
| */ |
| DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = |
| (unsigned long)&init_thread_union + THREAD_SIZE; |
| EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); |
| |
| #ifdef CONFIG_CC_STACKPROTECTOR |
| DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
| #endif |
| |
| #endif /* CONFIG_X86_64 */ |
| |
| /* |
| * Clear all 6 debug registers: |
| */ |
| static void clear_all_debug_regs(void) |
| { |
| int i; |
| |
| for (i = 0; i < 8; i++) { |
| /* Ignore db4, db5 */ |
| if ((i == 4) || (i == 5)) |
| continue; |
| |
| set_debugreg(0, i); |
| } |
| } |
| |
| #ifdef CONFIG_KGDB |
| /* |
| * Restore debug regs if using kgdbwait and you have a kernel debugger |
| * connection established. |
| */ |
| static void dbg_restore_debug_regs(void) |
| { |
| if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) |
| arch_kgdb_ops.correct_hw_break(); |
| } |
| #else /* ! CONFIG_KGDB */ |
| #define dbg_restore_debug_regs() |
| #endif /* ! CONFIG_KGDB */ |
| |
| static void wait_for_master_cpu(int cpu) |
| { |
| #ifdef CONFIG_SMP |
| /* |
| * wait for ACK from master CPU before continuing |
| * with AP initialization |
| */ |
| WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); |
| while (!cpumask_test_cpu(cpu, cpu_callout_mask)) |
| cpu_relax(); |
| #endif |
| } |
| |
| /* |
| * cpu_init() initializes state that is per-CPU. Some data is already |
| * initialized (naturally) in the bootstrap process, such as the GDT |
| * and IDT. We reload them nevertheless, this function acts as a |
| * 'CPU state barrier', nothing should get across. |
| * A lot of state is already set up in PDA init for 64 bit |
| */ |
| #ifdef CONFIG_X86_64 |
| |
| void cpu_init(void) |
| { |
| struct orig_ist *oist; |
| struct task_struct *me; |
| struct tss_struct *t; |
| unsigned long v; |
| int cpu = raw_smp_processor_id(); |
| int i; |
| |
| wait_for_master_cpu(cpu); |
| |
| /* |
| * Initialize the CR4 shadow before doing anything that could |
| * try to read it. |
| */ |
| cr4_init_shadow(); |
| |
| if (cpu) |
| load_ucode_ap(); |
| |
| t = &per_cpu(cpu_tss_rw, cpu); |
| oist = &per_cpu(orig_ist, cpu); |
| |
| #ifdef CONFIG_NUMA |
| if (this_cpu_read(numa_node) == 0 && |
| early_cpu_to_node(cpu) != NUMA_NO_NODE) |
| set_numa_node(early_cpu_to_node(cpu)); |
| #endif |
| |
| me = current; |
| |
| pr_debug("Initializing CPU#%d\n", cpu); |
| |
| cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| |
| /* |
| * Initialize the per-CPU GDT with the boot GDT, |
| * and set up the GDT descriptor: |
| */ |
| |
| switch_to_new_gdt(cpu); |
| loadsegment(fs, 0); |
| |
| load_current_idt(); |
| |
| memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
| syscall_init(); |
| |
| wrmsrl(MSR_FS_BASE, 0); |
| wrmsrl(MSR_KERNEL_GS_BASE, 0); |
| barrier(); |
| |
| x86_configure_nx(); |
| x2apic_setup(); |
| |
| /* |
| * set up and load the per-CPU TSS |
| */ |
| if (!oist->ist[0]) { |
| char *estacks = get_cpu_entry_area(cpu)->exception_stacks; |
| |
| for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
| estacks += exception_stack_sizes[v]; |
| oist->ist[v] = t->x86_tss.ist[v] = |
| (unsigned long)estacks; |
| if (v == DEBUG_STACK-1) |
| per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; |
| } |
| } |
| |
| t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
| |
| /* |
| * <= is required because the CPU will access up to |
| * 8 bits beyond the end of the IO permission bitmap. |
| */ |
| for (i = 0; i <= IO_BITMAP_LONGS; i++) |
| t->io_bitmap[i] = ~0UL; |
| |
| mmgrab(&init_mm); |
| me->active_mm = &init_mm; |
| BUG_ON(me->mm); |
| initialize_tlbstate_and_flush(); |
| enter_lazy_tlb(&init_mm, me); |
| |
| /* |
| * Initialize the TSS. sp0 points to the entry trampoline stack |
| * regardless of what task is running. |
| */ |
| set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); |
| load_TR_desc(); |
| load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); |
| |
| load_mm_ldt(&init_mm); |
| |
| clear_all_debug_regs(); |
| dbg_restore_debug_regs(); |
| |
| if (is_uv_system()) |
| uv_cpu_init(); |
| |
| load_fixmap_gdt(cpu); |
| } |
| |
| #else |
| |
| void cpu_init(void) |
| { |
| int cpu = smp_processor_id(); |
| struct task_struct *curr = current; |
| struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); |
| |
| wait_for_master_cpu(cpu); |
| |
| /* |
| * Initialize the CR4 shadow before doing anything that could |
| * try to read it. |
| */ |
| cr4_init_shadow(); |
| |
| show_ucode_info_early(); |
| |
| pr_info("Initializing CPU#%d\n", cpu); |
| |
| if (cpu_feature_enabled(X86_FEATURE_VME) || |
| boot_cpu_has(X86_FEATURE_TSC) || |
| boot_cpu_has(X86_FEATURE_DE)) |
| cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| |
| load_current_idt(); |
| switch_to_new_gdt(cpu); |
| |
| /* |
| * Set up and load the per-CPU TSS and LDT |
| */ |
| mmgrab(&init_mm); |
| curr->active_mm = &init_mm; |
| BUG_ON(curr->mm); |
| initialize_tlbstate_and_flush(); |
| enter_lazy_tlb(&init_mm, curr); |
| |
| /* |
| * Initialize the TSS. Don't bother initializing sp0, as the initial |
| * task never enters user mode. |
| */ |
| set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); |
| load_TR_desc(); |
| |
| load_mm_ldt(&init_mm); |
| |
| t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
| |
| #ifdef CONFIG_DOUBLEFAULT |
| /* Set up doublefault TSS pointer in the GDT */ |
| __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
| #endif |
| |
| clear_all_debug_regs(); |
| dbg_restore_debug_regs(); |
| |
| load_fixmap_gdt(cpu); |
| } |
| #endif |
| |
| static void bsp_resume(void) |
| { |
| if (this_cpu->c_bsp_resume) |
| this_cpu->c_bsp_resume(&boot_cpu_data); |
| } |
| |
| static struct syscore_ops cpu_syscore_ops = { |
| .resume = bsp_resume, |
| }; |
| |
| static int __init init_cpu_syscore(void) |
| { |
| register_syscore_ops(&cpu_syscore_ops); |
| return 0; |
| } |
| core_initcall(init_cpu_syscore); |
| |
| /* |
| * The microcode loader calls this upon late microcode load to recheck features, |
| * only when microcode has been updated. Caller holds microcode_mutex and CPU |
| * hotplug lock. |
| */ |
| void microcode_check(void) |
| { |
| struct cpuinfo_x86 info; |
| |
| perf_check_microcode(); |
| |
| /* Reload CPUID max function as it might've changed. */ |
| info.cpuid_level = cpuid_eax(0); |
| |
| /* |
| * Copy all capability leafs to pick up the synthetic ones so that |
| * memcmp() below doesn't fail on that. The ones coming from CPUID will |
| * get overwritten in get_cpu_cap(). |
| */ |
| memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); |
| |
| get_cpu_cap(&info); |
| |
| if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) |
| return; |
| |
| pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); |
| pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); |
| } |
| |
| void __init arch_cpu_finalize_init(void) |
| { |
| identify_boot_cpu(); |
| |
| /* |
| * identify_boot_cpu() initialized SMT support information, let the |
| * core code know. |
| */ |
| cpu_smt_check_topology(); |
| |
| if (!IS_ENABLED(CONFIG_SMP)) { |
| pr_info("CPU: "); |
| print_cpu_info(&boot_cpu_data); |
| } |
| |
| cpu_select_mitigations(); |
| |
| arch_smt_update(); |
| |
| if (IS_ENABLED(CONFIG_X86_32)) { |
| /* |
| * Check whether this is a real i386 which is not longer |
| * supported and fixup the utsname. |
| */ |
| if (boot_cpu_data.x86 < 4) |
| panic("Kernel requires i486+ for 'invlpg' and other features"); |
| |
| init_utsname()->machine[1] = |
| '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); |
| } |
| |
| /* |
| * Must be before alternatives because it might set or clear |
| * feature bits. |
| */ |
| fpu__init_system(); |
| fpu__init_cpu(); |
| |
| alternative_instructions(); |
| |
| if (IS_ENABLED(CONFIG_X86_64)) { |
| /* |
| * Make sure the first 2MB area is not mapped by huge pages |
| * There are typically fixed size MTRRs in there and overlapping |
| * MTRRs into large pages causes slow downs. |
| * |
| * Right now we don't do that with gbpages because there seems |
| * very little benefit for that case. |
| */ |
| if (!direct_gbpages) |
| set_memory_4k((unsigned long)__va(0), 1); |
| } else { |
| fpu__init_check_bugs(); |
| } |
| |
| /* |
| * This needs to be called before any devices perform DMA |
| * operations that might use the SWIOTLB bounce buffers. It will |
| * mark the bounce buffers as decrypted so that their usage will |
| * not cause "plain-text" data to be decrypted when accessed. It |
| * must be called after late_time_init() so that Hyper-V x86/x64 |
| * hypercalls work when the SWIOTLB bounce buffers are decrypted. |
| */ |
| mem_encrypt_init(); |
| } |