Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 1 | /* |
| 2 | * vdso setup for s390 |
| 3 | * |
| 4 | * Copyright IBM Corp. 2008 |
| 5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License (version 2 only) |
| 9 | * as published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/smp.h> |
| 18 | #include <linux/stddef.h> |
| 19 | #include <linux/unistd.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/user.h> |
| 22 | #include <linux/elf.h> |
| 23 | #include <linux/security.h> |
| 24 | #include <linux/bootmem.h> |
Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 25 | #include <linux/compat.h> |
Heiko Carstens | cbb870c | 2010-02-26 22:37:43 +0100 | [diff] [blame] | 26 | #include <asm/asm-offsets.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 27 | #include <asm/pgtable.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 28 | #include <asm/processor.h> |
| 29 | #include <asm/mmu.h> |
| 30 | #include <asm/mmu_context.h> |
| 31 | #include <asm/sections.h> |
| 32 | #include <asm/vdso.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 33 | #include <asm/facility.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 34 | |
Heiko Carstens | 5a79859 | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 35 | #ifdef CONFIG_COMPAT |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 36 | extern char vdso32_start, vdso32_end; |
| 37 | static void *vdso32_kbase = &vdso32_start; |
| 38 | static unsigned int vdso32_pages; |
| 39 | static struct page **vdso32_pagelist; |
| 40 | #endif |
| 41 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 42 | extern char vdso64_start, vdso64_end; |
| 43 | static void *vdso64_kbase = &vdso64_start; |
| 44 | static unsigned int vdso64_pages; |
| 45 | static struct page **vdso64_pagelist; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 46 | |
| 47 | /* |
| 48 | * Should the kernel map a VDSO page into processes and pass its |
| 49 | * address down to glibc upon exec()? |
| 50 | */ |
| 51 | unsigned int __read_mostly vdso_enabled = 1; |
| 52 | |
| 53 | static int __init vdso_setup(char *s) |
| 54 | { |
Martin Schwidefsky | 7aa79f9 | 2009-06-12 10:26:20 +0200 | [diff] [blame] | 55 | unsigned long val; |
| 56 | int rc; |
| 57 | |
| 58 | rc = 0; |
| 59 | if (strncmp(s, "on", 3) == 0) |
| 60 | vdso_enabled = 1; |
| 61 | else if (strncmp(s, "off", 4) == 0) |
| 62 | vdso_enabled = 0; |
| 63 | else { |
Heiko Carstens | 958d907 | 2013-07-22 06:43:57 +0200 | [diff] [blame] | 64 | rc = kstrtoul(s, 0, &val); |
Martin Schwidefsky | 7aa79f9 | 2009-06-12 10:26:20 +0200 | [diff] [blame] | 65 | vdso_enabled = rc ? 0 : !!val; |
| 66 | } |
| 67 | return !rc; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 68 | } |
| 69 | __setup("vdso=", vdso_setup); |
| 70 | |
| 71 | /* |
| 72 | * The vdso data page |
| 73 | */ |
| 74 | static union { |
| 75 | struct vdso_data data; |
| 76 | u8 page[PAGE_SIZE]; |
Tim Abbott | abe1ee3 | 2009-09-20 18:14:15 -0400 | [diff] [blame] | 77 | } vdso_data_store __page_aligned_data; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 78 | struct vdso_data *vdso_data = &vdso_data_store.data; |
| 79 | |
| 80 | /* |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 81 | * Setup vdso data page. |
| 82 | */ |
| 83 | static void vdso_init_data(struct vdso_data *vd) |
| 84 | { |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 85 | vd->ectg_available = test_facility(31); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 86 | } |
| 87 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 88 | /* |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 89 | * Allocate/free per cpu vdso data. |
| 90 | */ |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 91 | #define SEGMENT_ORDER 2 |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 92 | |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 93 | int vdso_alloc_per_cpu(struct _lowcore *lowcore) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 94 | { |
| 95 | unsigned long segment_table, page_table, page_frame; |
| 96 | u32 *psal, *aste; |
| 97 | int i; |
| 98 | |
| 99 | lowcore->vdso_per_cpu_data = __LC_PASTE; |
| 100 | |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 101 | if (!vdso_enabled) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 102 | return 0; |
| 103 | |
| 104 | segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); |
| 105 | page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA); |
| 106 | page_frame = get_zeroed_page(GFP_KERNEL); |
| 107 | if (!segment_table || !page_table || !page_frame) |
| 108 | goto out; |
| 109 | |
| 110 | clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, |
| 111 | PAGE_SIZE << SEGMENT_ORDER); |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 112 | clear_table((unsigned long *) page_table, _PAGE_INVALID, |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 113 | 256*sizeof(unsigned long)); |
| 114 | |
| 115 | *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 116 | *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 117 | |
| 118 | psal = (u32 *) (page_table + 256*sizeof(unsigned long)); |
| 119 | aste = psal + 32; |
| 120 | |
| 121 | for (i = 4; i < 32; i += 4) |
| 122 | psal[i] = 0x80000000; |
| 123 | |
| 124 | lowcore->paste[4] = (u32)(addr_t) psal; |
Heiko Carstens | 06aae68 | 2013-11-30 12:57:13 +0100 | [diff] [blame] | 125 | psal[0] = 0x02000000; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 126 | psal[2] = (u32)(addr_t) aste; |
| 127 | *(unsigned long *) (aste + 2) = segment_table + |
| 128 | _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; |
| 129 | aste[4] = (u32)(addr_t) psal; |
| 130 | lowcore->vdso_per_cpu_data = page_frame; |
| 131 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 132 | return 0; |
| 133 | |
| 134 | out: |
| 135 | free_page(page_frame); |
| 136 | free_page(page_table); |
| 137 | free_pages(segment_table, SEGMENT_ORDER); |
| 138 | return -ENOMEM; |
| 139 | } |
| 140 | |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 141 | void vdso_free_per_cpu(struct _lowcore *lowcore) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 142 | { |
| 143 | unsigned long segment_table, page_table, page_frame; |
| 144 | u32 *psal, *aste; |
| 145 | |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 146 | if (!vdso_enabled) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 147 | return; |
| 148 | |
| 149 | psal = (u32 *)(addr_t) lowcore->paste[4]; |
| 150 | aste = (u32 *)(addr_t) psal[2]; |
| 151 | segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK; |
| 152 | page_table = *(unsigned long *) segment_table; |
| 153 | page_frame = *(unsigned long *) page_table; |
| 154 | |
| 155 | free_page(page_frame); |
| 156 | free_page(page_table); |
| 157 | free_pages(segment_table, SEGMENT_ORDER); |
| 158 | } |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 159 | |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 160 | static void vdso_init_cr5(void) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 161 | { |
| 162 | unsigned long cr5; |
| 163 | |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 164 | if (!vdso_enabled) |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 165 | return; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 166 | cr5 = offsetof(struct _lowcore, paste); |
| 167 | __ctl_load(cr5, 5, 5); |
| 168 | } |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 169 | |
| 170 | /* |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 171 | * This is called from binfmt_elf, we create the special vma for the |
| 172 | * vDSO and insert it into the mm struct tree |
| 173 | */ |
| 174 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 175 | { |
| 176 | struct mm_struct *mm = current->mm; |
| 177 | struct page **vdso_pagelist; |
| 178 | unsigned long vdso_pages; |
| 179 | unsigned long vdso_base; |
| 180 | int rc; |
| 181 | |
| 182 | if (!vdso_enabled) |
| 183 | return 0; |
| 184 | /* |
| 185 | * Only map the vdso for dynamically linked elf binaries. |
| 186 | */ |
| 187 | if (!uses_interp) |
| 188 | return 0; |
| 189 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 190 | vdso_pagelist = vdso64_pagelist; |
| 191 | vdso_pages = vdso64_pages; |
| 192 | #ifdef CONFIG_COMPAT |
Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 193 | if (is_compat_task()) { |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 194 | vdso_pagelist = vdso32_pagelist; |
| 195 | vdso_pages = vdso32_pages; |
| 196 | } |
| 197 | #endif |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 198 | /* |
| 199 | * vDSO has a problem and was disabled, just don't "enable" it for |
| 200 | * the process |
| 201 | */ |
| 202 | if (vdso_pages == 0) |
| 203 | return 0; |
| 204 | |
| 205 | current->mm->context.vdso_base = 0; |
| 206 | |
| 207 | /* |
| 208 | * pick a base address for the vDSO in process space. We try to put |
| 209 | * it at vdso_base which is the "natural" base for it, but we might |
| 210 | * fail and end up putting it elsewhere. |
| 211 | */ |
| 212 | down_write(&mm->mmap_sem); |
Heiko Carstens | e7828bb | 2011-01-12 09:55:24 +0100 | [diff] [blame] | 213 | vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 214 | if (IS_ERR_VALUE(vdso_base)) { |
| 215 | rc = vdso_base; |
| 216 | goto out_up; |
| 217 | } |
| 218 | |
| 219 | /* |
Heiko Carstens | 930e44f | 2009-10-06 10:34:08 +0200 | [diff] [blame] | 220 | * Put vDSO base into mm struct. We need to do this before calling |
| 221 | * install_special_mapping or the perf counter mmap tracking code |
| 222 | * will fail to recognise it as a vDSO (since arch_vma_name fails). |
| 223 | */ |
| 224 | current->mm->context.vdso_base = vdso_base; |
| 225 | |
| 226 | /* |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 227 | * our vma flags don't have VM_WRITE so by default, the process |
| 228 | * isn't allowed to write those pages. |
| 229 | * gdb can break that with ptrace interface, and thus trigger COW |
| 230 | * on those pages but it's then your responsibility to never do that |
| 231 | * on the "data" page of the vDSO or you'll stop getting kernel |
| 232 | * updates and your nice userland gettimeofday will be totally dead. |
| 233 | * It's fine to use that for setting breakpoints in the vDSO code |
Jason Baron | 909af76 | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 234 | * pages though. |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 235 | */ |
| 236 | rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, |
| 237 | VM_READ|VM_EXEC| |
Jason Baron | 909af76 | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 238 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 239 | vdso_pagelist); |
| 240 | if (rc) |
Heiko Carstens | 930e44f | 2009-10-06 10:34:08 +0200 | [diff] [blame] | 241 | current->mm->context.vdso_base = 0; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 242 | out_up: |
| 243 | up_write(&mm->mmap_sem); |
| 244 | return rc; |
| 245 | } |
| 246 | |
| 247 | const char *arch_vma_name(struct vm_area_struct *vma) |
| 248 | { |
| 249 | if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) |
| 250 | return "[vdso]"; |
| 251 | return NULL; |
| 252 | } |
| 253 | |
| 254 | static int __init vdso_init(void) |
| 255 | { |
| 256 | int i; |
| 257 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 258 | if (!vdso_enabled) |
| 259 | return 0; |
| 260 | vdso_init_data(vdso_data); |
Heiko Carstens | 5a79859 | 2015-02-12 13:08:27 +0100 | [diff] [blame] | 261 | #ifdef CONFIG_COMPAT |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 262 | /* Calculate the size of the 32 bit vDSO */ |
| 263 | vdso32_pages = ((&vdso32_end - &vdso32_start |
| 264 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
| 265 | |
| 266 | /* Make sure pages are in the correct state */ |
| 267 | vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), |
| 268 | GFP_KERNEL); |
| 269 | BUG_ON(vdso32_pagelist == NULL); |
| 270 | for (i = 0; i < vdso32_pages - 1; i++) { |
| 271 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); |
| 272 | ClearPageReserved(pg); |
| 273 | get_page(pg); |
| 274 | vdso32_pagelist[i] = pg; |
| 275 | } |
| 276 | vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); |
| 277 | vdso32_pagelist[vdso32_pages] = NULL; |
| 278 | #endif |
| 279 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 280 | /* Calculate the size of the 64 bit vDSO */ |
| 281 | vdso64_pages = ((&vdso64_end - &vdso64_start |
| 282 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
| 283 | |
| 284 | /* Make sure pages are in the correct state */ |
| 285 | vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), |
| 286 | GFP_KERNEL); |
| 287 | BUG_ON(vdso64_pagelist == NULL); |
| 288 | for (i = 0; i < vdso64_pages - 1; i++) { |
| 289 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
| 290 | ClearPageReserved(pg); |
| 291 | get_page(pg); |
| 292 | vdso64_pagelist[i] = pg; |
| 293 | } |
| 294 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); |
| 295 | vdso64_pagelist[vdso64_pages] = NULL; |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 296 | if (vdso_alloc_per_cpu(&S390_lowcore)) |
Heiko Carstens | 81ffa04 | 2009-01-09 12:14:54 +0100 | [diff] [blame] | 297 | BUG(); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 298 | vdso_init_cr5(); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 299 | |
| 300 | get_page(virt_to_page(vdso_data)); |
| 301 | |
| 302 | smp_wmb(); |
| 303 | |
| 304 | return 0; |
| 305 | } |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 306 | early_initcall(vdso_init); |