Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 1 | /* |
| 2 | * vdso setup for s390 |
| 3 | * |
| 4 | * Copyright IBM Corp. 2008 |
| 5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License (version 2 only) |
| 9 | * as published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/smp.h> |
| 18 | #include <linux/stddef.h> |
| 19 | #include <linux/unistd.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/user.h> |
| 22 | #include <linux/elf.h> |
| 23 | #include <linux/security.h> |
| 24 | #include <linux/bootmem.h> |
Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 25 | #include <linux/compat.h> |
Heiko Carstens | cbb870c | 2010-02-26 22:37:43 +0100 | [diff] [blame] | 26 | #include <asm/asm-offsets.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 27 | #include <asm/pgtable.h> |
| 28 | #include <asm/system.h> |
| 29 | #include <asm/processor.h> |
| 30 | #include <asm/mmu.h> |
| 31 | #include <asm/mmu_context.h> |
| 32 | #include <asm/sections.h> |
| 33 | #include <asm/vdso.h> |
| 34 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 35 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) |
| 36 | extern char vdso32_start, vdso32_end; |
| 37 | static void *vdso32_kbase = &vdso32_start; |
| 38 | static unsigned int vdso32_pages; |
| 39 | static struct page **vdso32_pagelist; |
| 40 | #endif |
| 41 | |
| 42 | #ifdef CONFIG_64BIT |
| 43 | extern char vdso64_start, vdso64_end; |
| 44 | static void *vdso64_kbase = &vdso64_start; |
| 45 | static unsigned int vdso64_pages; |
| 46 | static struct page **vdso64_pagelist; |
| 47 | #endif /* CONFIG_64BIT */ |
| 48 | |
| 49 | /* |
| 50 | * Should the kernel map a VDSO page into processes and pass its |
| 51 | * address down to glibc upon exec()? |
| 52 | */ |
| 53 | unsigned int __read_mostly vdso_enabled = 1; |
| 54 | |
| 55 | static int __init vdso_setup(char *s) |
| 56 | { |
Martin Schwidefsky | 7aa79f9 | 2009-06-12 10:26:20 +0200 | [diff] [blame] | 57 | unsigned long val; |
| 58 | int rc; |
| 59 | |
| 60 | rc = 0; |
| 61 | if (strncmp(s, "on", 3) == 0) |
| 62 | vdso_enabled = 1; |
| 63 | else if (strncmp(s, "off", 4) == 0) |
| 64 | vdso_enabled = 0; |
| 65 | else { |
| 66 | rc = strict_strtoul(s, 0, &val); |
| 67 | vdso_enabled = rc ? 0 : !!val; |
| 68 | } |
| 69 | return !rc; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 70 | } |
| 71 | __setup("vdso=", vdso_setup); |
| 72 | |
| 73 | /* |
| 74 | * The vdso data page |
| 75 | */ |
| 76 | static union { |
| 77 | struct vdso_data data; |
| 78 | u8 page[PAGE_SIZE]; |
Tim Abbott | abe1ee3 | 2009-09-20 18:14:15 -0400 | [diff] [blame] | 79 | } vdso_data_store __page_aligned_data; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 80 | struct vdso_data *vdso_data = &vdso_data_store.data; |
| 81 | |
| 82 | /* |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 83 | * Setup vdso data page. |
| 84 | */ |
| 85 | static void vdso_init_data(struct vdso_data *vd) |
| 86 | { |
Martin Schwidefsky | 14375bc | 2010-10-25 16:10:51 +0200 | [diff] [blame] | 87 | vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | #ifdef CONFIG_64BIT |
| 91 | /* |
| 92 | * Setup per cpu vdso data page. |
| 93 | */ |
| 94 | static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd) |
| 95 | { |
| 96 | } |
| 97 | |
| 98 | /* |
| 99 | * Allocate/free per cpu vdso data. |
| 100 | */ |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 101 | #define SEGMENT_ORDER 2 |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 102 | |
| 103 | int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) |
| 104 | { |
| 105 | unsigned long segment_table, page_table, page_frame; |
| 106 | u32 *psal, *aste; |
| 107 | int i; |
| 108 | |
| 109 | lowcore->vdso_per_cpu_data = __LC_PASTE; |
| 110 | |
Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 111 | if (user_mode == HOME_SPACE_MODE || !vdso_enabled) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 112 | return 0; |
| 113 | |
| 114 | segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); |
| 115 | page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA); |
| 116 | page_frame = get_zeroed_page(GFP_KERNEL); |
| 117 | if (!segment_table || !page_table || !page_frame) |
| 118 | goto out; |
| 119 | |
| 120 | clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, |
| 121 | PAGE_SIZE << SEGMENT_ORDER); |
| 122 | clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, |
| 123 | 256*sizeof(unsigned long)); |
| 124 | |
| 125 | *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; |
| 126 | *(unsigned long *) page_table = _PAGE_RO + page_frame; |
| 127 | |
| 128 | psal = (u32 *) (page_table + 256*sizeof(unsigned long)); |
| 129 | aste = psal + 32; |
| 130 | |
| 131 | for (i = 4; i < 32; i += 4) |
| 132 | psal[i] = 0x80000000; |
| 133 | |
| 134 | lowcore->paste[4] = (u32)(addr_t) psal; |
| 135 | psal[0] = 0x20000000; |
| 136 | psal[2] = (u32)(addr_t) aste; |
| 137 | *(unsigned long *) (aste + 2) = segment_table + |
| 138 | _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; |
| 139 | aste[4] = (u32)(addr_t) psal; |
| 140 | lowcore->vdso_per_cpu_data = page_frame; |
| 141 | |
| 142 | vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame); |
| 143 | return 0; |
| 144 | |
| 145 | out: |
| 146 | free_page(page_frame); |
| 147 | free_page(page_table); |
| 148 | free_pages(segment_table, SEGMENT_ORDER); |
| 149 | return -ENOMEM; |
| 150 | } |
| 151 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 152 | void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) |
| 153 | { |
| 154 | unsigned long segment_table, page_table, page_frame; |
| 155 | u32 *psal, *aste; |
| 156 | |
Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 157 | if (user_mode == HOME_SPACE_MODE || !vdso_enabled) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 158 | return; |
| 159 | |
| 160 | psal = (u32 *)(addr_t) lowcore->paste[4]; |
| 161 | aste = (u32 *)(addr_t) psal[2]; |
| 162 | segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK; |
| 163 | page_table = *(unsigned long *) segment_table; |
| 164 | page_frame = *(unsigned long *) page_table; |
| 165 | |
| 166 | free_page(page_frame); |
| 167 | free_page(page_table); |
| 168 | free_pages(segment_table, SEGMENT_ORDER); |
| 169 | } |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 170 | |
| 171 | static void __vdso_init_cr5(void *dummy) |
| 172 | { |
| 173 | unsigned long cr5; |
| 174 | |
| 175 | cr5 = offsetof(struct _lowcore, paste); |
| 176 | __ctl_load(cr5, 5, 5); |
| 177 | } |
| 178 | |
| 179 | static void vdso_init_cr5(void) |
| 180 | { |
Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 181 | if (user_mode != HOME_SPACE_MODE && vdso_enabled) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 182 | on_each_cpu(__vdso_init_cr5, NULL, 1); |
| 183 | } |
| 184 | #endif /* CONFIG_64BIT */ |
| 185 | |
| 186 | /* |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 187 | * This is called from binfmt_elf, we create the special vma for the |
| 188 | * vDSO and insert it into the mm struct tree |
| 189 | */ |
| 190 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 191 | { |
| 192 | struct mm_struct *mm = current->mm; |
| 193 | struct page **vdso_pagelist; |
| 194 | unsigned long vdso_pages; |
| 195 | unsigned long vdso_base; |
| 196 | int rc; |
| 197 | |
| 198 | if (!vdso_enabled) |
| 199 | return 0; |
| 200 | /* |
| 201 | * Only map the vdso for dynamically linked elf binaries. |
| 202 | */ |
| 203 | if (!uses_interp) |
| 204 | return 0; |
| 205 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 206 | #ifdef CONFIG_64BIT |
| 207 | vdso_pagelist = vdso64_pagelist; |
| 208 | vdso_pages = vdso64_pages; |
| 209 | #ifdef CONFIG_COMPAT |
Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 210 | if (is_compat_task()) { |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 211 | vdso_pagelist = vdso32_pagelist; |
| 212 | vdso_pages = vdso32_pages; |
| 213 | } |
| 214 | #endif |
| 215 | #else |
| 216 | vdso_pagelist = vdso32_pagelist; |
| 217 | vdso_pages = vdso32_pages; |
| 218 | #endif |
| 219 | |
| 220 | /* |
| 221 | * vDSO has a problem and was disabled, just don't "enable" it for |
| 222 | * the process |
| 223 | */ |
| 224 | if (vdso_pages == 0) |
| 225 | return 0; |
| 226 | |
| 227 | current->mm->context.vdso_base = 0; |
| 228 | |
| 229 | /* |
| 230 | * pick a base address for the vDSO in process space. We try to put |
| 231 | * it at vdso_base which is the "natural" base for it, but we might |
| 232 | * fail and end up putting it elsewhere. |
| 233 | */ |
| 234 | down_write(&mm->mmap_sem); |
Heiko Carstens | e7828bb | 2011-01-12 09:55:24 +0100 | [diff] [blame] | 235 | vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 236 | if (IS_ERR_VALUE(vdso_base)) { |
| 237 | rc = vdso_base; |
| 238 | goto out_up; |
| 239 | } |
| 240 | |
| 241 | /* |
Heiko Carstens | 930e44f | 2009-10-06 10:34:08 +0200 | [diff] [blame] | 242 | * Put vDSO base into mm struct. We need to do this before calling |
| 243 | * install_special_mapping or the perf counter mmap tracking code |
| 244 | * will fail to recognise it as a vDSO (since arch_vma_name fails). |
| 245 | */ |
| 246 | current->mm->context.vdso_base = vdso_base; |
| 247 | |
| 248 | /* |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 249 | * our vma flags don't have VM_WRITE so by default, the process |
| 250 | * isn't allowed to write those pages. |
| 251 | * gdb can break that with ptrace interface, and thus trigger COW |
| 252 | * on those pages but it's then your responsibility to never do that |
| 253 | * on the "data" page of the vDSO or you'll stop getting kernel |
| 254 | * updates and your nice userland gettimeofday will be totally dead. |
| 255 | * It's fine to use that for setting breakpoints in the vDSO code |
| 256 | * pages though |
| 257 | * |
| 258 | * Make sure the vDSO gets into every core dump. |
| 259 | * Dumping its contents makes post-mortem fully interpretable later |
| 260 | * without matching up the same kernel and hardware config to see |
| 261 | * what PC values meant. |
| 262 | */ |
| 263 | rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, |
| 264 | VM_READ|VM_EXEC| |
| 265 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| |
| 266 | VM_ALWAYSDUMP, |
| 267 | vdso_pagelist); |
| 268 | if (rc) |
Heiko Carstens | 930e44f | 2009-10-06 10:34:08 +0200 | [diff] [blame] | 269 | current->mm->context.vdso_base = 0; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 270 | out_up: |
| 271 | up_write(&mm->mmap_sem); |
| 272 | return rc; |
| 273 | } |
| 274 | |
| 275 | const char *arch_vma_name(struct vm_area_struct *vma) |
| 276 | { |
| 277 | if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) |
| 278 | return "[vdso]"; |
| 279 | return NULL; |
| 280 | } |
| 281 | |
| 282 | static int __init vdso_init(void) |
| 283 | { |
| 284 | int i; |
| 285 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 286 | if (!vdso_enabled) |
| 287 | return 0; |
| 288 | vdso_init_data(vdso_data); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 289 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) |
| 290 | /* Calculate the size of the 32 bit vDSO */ |
| 291 | vdso32_pages = ((&vdso32_end - &vdso32_start |
| 292 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
| 293 | |
| 294 | /* Make sure pages are in the correct state */ |
| 295 | vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), |
| 296 | GFP_KERNEL); |
| 297 | BUG_ON(vdso32_pagelist == NULL); |
| 298 | for (i = 0; i < vdso32_pages - 1; i++) { |
| 299 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); |
| 300 | ClearPageReserved(pg); |
| 301 | get_page(pg); |
| 302 | vdso32_pagelist[i] = pg; |
| 303 | } |
| 304 | vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); |
| 305 | vdso32_pagelist[vdso32_pages] = NULL; |
| 306 | #endif |
| 307 | |
| 308 | #ifdef CONFIG_64BIT |
| 309 | /* Calculate the size of the 64 bit vDSO */ |
| 310 | vdso64_pages = ((&vdso64_end - &vdso64_start |
| 311 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
| 312 | |
| 313 | /* Make sure pages are in the correct state */ |
| 314 | vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), |
| 315 | GFP_KERNEL); |
| 316 | BUG_ON(vdso64_pagelist == NULL); |
| 317 | for (i = 0; i < vdso64_pages - 1; i++) { |
| 318 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
| 319 | ClearPageReserved(pg); |
| 320 | get_page(pg); |
| 321 | vdso64_pagelist[i] = pg; |
| 322 | } |
| 323 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); |
| 324 | vdso64_pagelist[vdso64_pages] = NULL; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 325 | #ifndef CONFIG_SMP |
Heiko Carstens | 81ffa04 | 2009-01-09 12:14:54 +0100 | [diff] [blame] | 326 | if (vdso_alloc_per_cpu(0, &S390_lowcore)) |
| 327 | BUG(); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 328 | #endif |
| 329 | vdso_init_cr5(); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 330 | #endif /* CONFIG_64BIT */ |
| 331 | |
| 332 | get_page(virt_to_page(vdso_data)); |
| 333 | |
| 334 | smp_wmb(); |
| 335 | |
| 336 | return 0; |
| 337 | } |
| 338 | arch_initcall(vdso_init); |
| 339 | |
Stephen Wilson | cae5d39 | 2011-03-13 15:49:17 -0400 | [diff] [blame] | 340 | int in_gate_area_no_mm(unsigned long addr) |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 341 | { |
| 342 | return 0; |
| 343 | } |
| 344 | |
Stephen Wilson | 83b964b | 2011-03-13 15:49:16 -0400 | [diff] [blame] | 345 | int in_gate_area(struct mm_struct *mm, unsigned long addr) |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 346 | { |
| 347 | return 0; |
| 348 | } |
| 349 | |
Stephen Wilson | 31db58b | 2011-03-13 15:49:15 -0400 | [diff] [blame] | 350 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 351 | { |
| 352 | return NULL; |
| 353 | } |