Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Simple NUMA memory policy for the Linux kernel. |
| 3 | * |
| 4 | * Copyright 2003,2004 Andi Kleen, SuSE Labs. |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 5 | * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Subject to the GNU Public License, version 2. |
| 7 | * |
| 8 | * NUMA policy allows the user to give hints in which node(s) memory should |
| 9 | * be allocated. |
| 10 | * |
| 11 | * Support four policies per VMA and per process: |
| 12 | * |
| 13 | * The VMA policy has priority over the process policy for a page fault. |
| 14 | * |
| 15 | * interleave Allocate memory interleaved over a set of nodes, |
| 16 | * with normal fallback if it fails. |
| 17 | * For VMA based allocations this interleaves based on the |
| 18 | * offset into the backing object or offset into the mapping |
| 19 | * for anonymous memory. For process policy an process counter |
| 20 | * is used. |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 21 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | * bind Only allocate memory on a specific set of nodes, |
| 23 | * no fallback. |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 24 | * FIXME: memory is allocated starting with the first node |
| 25 | * to the last. It would be better if bind would truly restrict |
| 26 | * the allocation to memory nodes instead |
| 27 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | * preferred Try a specific node first before normal fallback. |
| 29 | * As a special case node -1 here means do the allocation |
| 30 | * on the local CPU. This is normally identical to default, |
| 31 | * but useful to set in a VMA when you have a non default |
| 32 | * process policy. |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 33 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | * default Allocate on the local node first, or when on a VMA |
| 35 | * use the process policy. This is what Linux always did |
| 36 | * in a NUMA aware kernel and still does by, ahem, default. |
| 37 | * |
| 38 | * The process policy is applied for most non interrupt memory allocations |
| 39 | * in that process' context. Interrupts ignore the policies and always |
| 40 | * try to allocate on the local CPU. The VMA policy is only applied for memory |
| 41 | * allocations for a VMA in the VM. |
| 42 | * |
| 43 | * Currently there are a few corner cases in swapping where the policy |
| 44 | * is not applied, but the majority should be handled. When process policy |
| 45 | * is used it is not remembered over swap outs/swap ins. |
| 46 | * |
| 47 | * Only the highest zone in the zone hierarchy gets policied. Allocations |
| 48 | * requesting a lower zone just use default policy. This implies that |
| 49 | * on systems with highmem kernel lowmem allocation don't get policied. |
| 50 | * Same with GFP_DMA allocations. |
| 51 | * |
| 52 | * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between |
| 53 | * all users and remembered even when nobody has memory mapped. |
| 54 | */ |
| 55 | |
| 56 | /* Notebook: |
| 57 | fix mmap readahead to honour policy and enable policy for any page cache |
| 58 | object |
| 59 | statistics for bigpages |
| 60 | global policy for page cache? currently it uses process policy. Requires |
| 61 | first item above. |
| 62 | handle mremap for shared memory (currently ignored for the policy) |
| 63 | grows down? |
| 64 | make bind policy root only? It can trigger oom much faster and the |
| 65 | kernel is not always grateful with that. |
| 66 | could replace all the switch()es with a mempolicy_ops structure. |
| 67 | */ |
| 68 | |
| 69 | #include <linux/mempolicy.h> |
| 70 | #include <linux/mm.h> |
| 71 | #include <linux/highmem.h> |
| 72 | #include <linux/hugetlb.h> |
| 73 | #include <linux/kernel.h> |
| 74 | #include <linux/sched.h> |
| 75 | #include <linux/mm.h> |
| 76 | #include <linux/nodemask.h> |
| 77 | #include <linux/cpuset.h> |
| 78 | #include <linux/gfp.h> |
| 79 | #include <linux/slab.h> |
| 80 | #include <linux/string.h> |
| 81 | #include <linux/module.h> |
| 82 | #include <linux/interrupt.h> |
| 83 | #include <linux/init.h> |
| 84 | #include <linux/compat.h> |
| 85 | #include <linux/mempolicy.h> |
| 86 | #include <asm/tlbflush.h> |
| 87 | #include <asm/uaccess.h> |
| 88 | |
| 89 | static kmem_cache_t *policy_cache; |
| 90 | static kmem_cache_t *sn_cache; |
| 91 | |
| 92 | #define PDprintk(fmt...) |
| 93 | |
| 94 | /* Highest zone. An specific allocation for a zone below that is not |
| 95 | policied. */ |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 96 | int policy_zone = ZONE_DMA; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Andi Kleen | d42c699 | 2005-07-06 19:56:03 +0200 | [diff] [blame] | 98 | struct mempolicy default_policy = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | .refcnt = ATOMIC_INIT(1), /* never free it */ |
| 100 | .policy = MPOL_DEFAULT, |
| 101 | }; |
| 102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | /* Do sanity checking on a policy */ |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 104 | static int mpol_check_policy(int mode, nodemask_t *nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | { |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 106 | int empty = nodes_empty(*nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
| 108 | switch (mode) { |
| 109 | case MPOL_DEFAULT: |
| 110 | if (!empty) |
| 111 | return -EINVAL; |
| 112 | break; |
| 113 | case MPOL_BIND: |
| 114 | case MPOL_INTERLEAVE: |
| 115 | /* Preferred will only use the first bit, but allow |
| 116 | more for now. */ |
| 117 | if (empty) |
| 118 | return -EINVAL; |
| 119 | break; |
| 120 | } |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 121 | return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | /* Generate a custom zonelist for the BIND policy. */ |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 124 | static struct zonelist *bind_zonelist(nodemask_t *nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { |
| 126 | struct zonelist *zl; |
| 127 | int num, max, nd; |
| 128 | |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 129 | max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | zl = kmalloc(sizeof(void *) * max, GFP_KERNEL); |
| 131 | if (!zl) |
| 132 | return NULL; |
| 133 | num = 0; |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 134 | for_each_node_mask(nd, *nodes) |
| 135 | zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | zl->zones[num] = NULL; |
| 137 | return zl; |
| 138 | } |
| 139 | |
| 140 | /* Create a new policy */ |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 141 | static struct mempolicy *mpol_new(int mode, nodemask_t *nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { |
| 143 | struct mempolicy *policy; |
| 144 | |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 145 | PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | if (mode == MPOL_DEFAULT) |
| 147 | return NULL; |
| 148 | policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); |
| 149 | if (!policy) |
| 150 | return ERR_PTR(-ENOMEM); |
| 151 | atomic_set(&policy->refcnt, 1); |
| 152 | switch (mode) { |
| 153 | case MPOL_INTERLEAVE: |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 154 | policy->v.nodes = *nodes; |
Andi Kleen | 8f493d7 | 2006-01-03 00:07:28 +0100 | [diff] [blame] | 155 | if (nodes_weight(*nodes) == 0) { |
| 156 | kmem_cache_free(policy_cache, policy); |
| 157 | return ERR_PTR(-EINVAL); |
| 158 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | break; |
| 160 | case MPOL_PREFERRED: |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 161 | policy->v.preferred_node = first_node(*nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | if (policy->v.preferred_node >= MAX_NUMNODES) |
| 163 | policy->v.preferred_node = -1; |
| 164 | break; |
| 165 | case MPOL_BIND: |
| 166 | policy->v.zonelist = bind_zonelist(nodes); |
| 167 | if (policy->v.zonelist == NULL) { |
| 168 | kmem_cache_free(policy_cache, policy); |
| 169 | return ERR_PTR(-ENOMEM); |
| 170 | } |
| 171 | break; |
| 172 | } |
| 173 | policy->policy = mode; |
| 174 | return policy; |
| 175 | } |
| 176 | |
| 177 | /* Ensure all existing pages follow the policy. */ |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 178 | static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 179 | unsigned long addr, unsigned long end, nodemask_t *nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | { |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 181 | pte_t *orig_pte; |
| 182 | pte_t *pte; |
Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 183 | spinlock_t *ptl; |
Hugh Dickins | 941150a | 2005-06-21 17:15:06 -0700 | [diff] [blame] | 184 | |
Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 185 | orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 186 | do { |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 187 | struct page *page; |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 188 | unsigned int nid; |
| 189 | |
| 190 | if (!pte_present(*pte)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | continue; |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 192 | page = vm_normal_page(vma, addr, *pte); |
| 193 | if (!page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | continue; |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 195 | nid = page_to_nid(page); |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 196 | if (!node_isset(nid, *nodes)) |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 197 | break; |
| 198 | } while (pte++, addr += PAGE_SIZE, addr != end); |
Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 199 | pte_unmap_unlock(orig_pte, ptl); |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 200 | return addr != end; |
| 201 | } |
| 202 | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 203 | static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 204 | unsigned long addr, unsigned long end, nodemask_t *nodes) |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 205 | { |
| 206 | pmd_t *pmd; |
| 207 | unsigned long next; |
| 208 | |
| 209 | pmd = pmd_offset(pud, addr); |
| 210 | do { |
| 211 | next = pmd_addr_end(addr, end); |
| 212 | if (pmd_none_or_clear_bad(pmd)) |
| 213 | continue; |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 214 | if (check_pte_range(vma, pmd, addr, next, nodes)) |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 215 | return -EIO; |
| 216 | } while (pmd++, addr = next, addr != end); |
| 217 | return 0; |
| 218 | } |
| 219 | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 220 | static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 221 | unsigned long addr, unsigned long end, nodemask_t *nodes) |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 222 | { |
| 223 | pud_t *pud; |
| 224 | unsigned long next; |
| 225 | |
| 226 | pud = pud_offset(pgd, addr); |
| 227 | do { |
| 228 | next = pud_addr_end(addr, end); |
| 229 | if (pud_none_or_clear_bad(pud)) |
| 230 | continue; |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 231 | if (check_pmd_range(vma, pud, addr, next, nodes)) |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 232 | return -EIO; |
| 233 | } while (pud++, addr = next, addr != end); |
| 234 | return 0; |
| 235 | } |
| 236 | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 237 | static inline int check_pgd_range(struct vm_area_struct *vma, |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 238 | unsigned long addr, unsigned long end, nodemask_t *nodes) |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 239 | { |
| 240 | pgd_t *pgd; |
| 241 | unsigned long next; |
| 242 | |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 243 | pgd = pgd_offset(vma->vm_mm, addr); |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 244 | do { |
| 245 | next = pgd_addr_end(addr, end); |
| 246 | if (pgd_none_or_clear_bad(pgd)) |
| 247 | continue; |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 248 | if (check_pud_range(vma, pgd, addr, next, nodes)) |
Hugh Dickins | 91612e0 | 2005-06-21 17:15:07 -0700 | [diff] [blame] | 249 | return -EIO; |
| 250 | } while (pgd++, addr = next, addr != end); |
| 251 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | /* Step 1: check the range */ |
| 255 | static struct vm_area_struct * |
| 256 | check_range(struct mm_struct *mm, unsigned long start, unsigned long end, |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 257 | nodemask_t *nodes, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | { |
| 259 | int err; |
| 260 | struct vm_area_struct *first, *vma, *prev; |
| 261 | |
| 262 | first = find_vma(mm, start); |
| 263 | if (!first) |
| 264 | return ERR_PTR(-EFAULT); |
| 265 | prev = NULL; |
| 266 | for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { |
| 267 | if (!vma->vm_next && vma->vm_end < end) |
| 268 | return ERR_PTR(-EFAULT); |
| 269 | if (prev && prev->vm_end < vma->vm_start) |
| 270 | return ERR_PTR(-EFAULT); |
| 271 | if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) { |
Andi Kleen | 5b952b3 | 2005-09-13 01:25:08 -0700 | [diff] [blame] | 272 | unsigned long endvma = vma->vm_end; |
| 273 | if (endvma > end) |
| 274 | endvma = end; |
| 275 | if (vma->vm_start > start) |
| 276 | start = vma->vm_start; |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 277 | err = check_pgd_range(vma, start, endvma, nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | if (err) { |
| 279 | first = ERR_PTR(err); |
| 280 | break; |
| 281 | } |
| 282 | } |
| 283 | prev = vma; |
| 284 | } |
| 285 | return first; |
| 286 | } |
| 287 | |
| 288 | /* Apply policy to a single VMA */ |
| 289 | static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) |
| 290 | { |
| 291 | int err = 0; |
| 292 | struct mempolicy *old = vma->vm_policy; |
| 293 | |
| 294 | PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", |
| 295 | vma->vm_start, vma->vm_end, vma->vm_pgoff, |
| 296 | vma->vm_ops, vma->vm_file, |
| 297 | vma->vm_ops ? vma->vm_ops->set_policy : NULL); |
| 298 | |
| 299 | if (vma->vm_ops && vma->vm_ops->set_policy) |
| 300 | err = vma->vm_ops->set_policy(vma, new); |
| 301 | if (!err) { |
| 302 | mpol_get(new); |
| 303 | vma->vm_policy = new; |
| 304 | mpol_free(old); |
| 305 | } |
| 306 | return err; |
| 307 | } |
| 308 | |
| 309 | /* Step 2: apply policy to a range and do splits. */ |
| 310 | static int mbind_range(struct vm_area_struct *vma, unsigned long start, |
| 311 | unsigned long end, struct mempolicy *new) |
| 312 | { |
| 313 | struct vm_area_struct *next; |
| 314 | int err; |
| 315 | |
| 316 | err = 0; |
| 317 | for (; vma && vma->vm_start < end; vma = next) { |
| 318 | next = vma->vm_next; |
| 319 | if (vma->vm_start < start) |
| 320 | err = split_vma(vma->vm_mm, vma, start, 1); |
| 321 | if (!err && vma->vm_end > end) |
| 322 | err = split_vma(vma->vm_mm, vma, end, 0); |
| 323 | if (!err) |
| 324 | err = policy_vma(vma, new); |
| 325 | if (err) |
| 326 | break; |
| 327 | } |
| 328 | return err; |
| 329 | } |
| 330 | |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 331 | static int contextualize_policy(int mode, nodemask_t *nodes) |
| 332 | { |
| 333 | if (!nodes) |
| 334 | return 0; |
| 335 | |
| 336 | /* Update current mems_allowed */ |
| 337 | cpuset_update_current_mems_allowed(); |
| 338 | /* Ignore nodes not set in current->mems_allowed */ |
| 339 | cpuset_restrict_to_mems_allowed(nodes->bits); |
| 340 | return mpol_check_policy(mode, nodes); |
| 341 | } |
| 342 | |
| 343 | long do_mbind(unsigned long start, unsigned long len, |
| 344 | unsigned long mode, nodemask_t *nmask, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | { |
| 346 | struct vm_area_struct *vma; |
| 347 | struct mm_struct *mm = current->mm; |
| 348 | struct mempolicy *new; |
| 349 | unsigned long end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | int err; |
| 351 | |
| 352 | if ((flags & ~(unsigned long)(MPOL_MF_STRICT)) || mode > MPOL_MAX) |
| 353 | return -EINVAL; |
| 354 | if (start & ~PAGE_MASK) |
| 355 | return -EINVAL; |
| 356 | if (mode == MPOL_DEFAULT) |
| 357 | flags &= ~MPOL_MF_STRICT; |
| 358 | len = (len + PAGE_SIZE - 1) & PAGE_MASK; |
| 359 | end = start + len; |
| 360 | if (end < start) |
| 361 | return -EINVAL; |
| 362 | if (end == start) |
| 363 | return 0; |
Christoph Lameter | 5fcbb23 | 2005-10-29 18:17:00 -0700 | [diff] [blame] | 364 | if (mpol_check_policy(mode, nmask)) |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 365 | return -EINVAL; |
| 366 | new = mpol_new(mode, nmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | if (IS_ERR(new)) |
| 368 | return PTR_ERR(new); |
| 369 | |
| 370 | PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len, |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 371 | mode,nodes_addr(nodes)[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | |
| 373 | down_write(&mm->mmap_sem); |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 374 | vma = check_range(mm, start, end, nmask, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | err = PTR_ERR(vma); |
| 376 | if (!IS_ERR(vma)) |
| 377 | err = mbind_range(vma, start, end, new); |
| 378 | up_write(&mm->mmap_sem); |
| 379 | mpol_free(new); |
| 380 | return err; |
| 381 | } |
| 382 | |
| 383 | /* Set the process memory policy */ |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 384 | long do_set_mempolicy(int mode, nodemask_t *nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | struct mempolicy *new; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 388 | if (contextualize_policy(mode, nodes)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | return -EINVAL; |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 390 | new = mpol_new(mode, nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | if (IS_ERR(new)) |
| 392 | return PTR_ERR(new); |
| 393 | mpol_free(current->mempolicy); |
| 394 | current->mempolicy = new; |
| 395 | if (new && new->policy == MPOL_INTERLEAVE) |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 396 | current->il_next = first_node(new->v.nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | /* Fill a zone bitmap for a policy */ |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 401 | static void get_zonemask(struct mempolicy *p, nodemask_t *nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | { |
| 403 | int i; |
| 404 | |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 405 | nodes_clear(*nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | switch (p->policy) { |
| 407 | case MPOL_BIND: |
| 408 | for (i = 0; p->v.zonelist->zones[i]; i++) |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 409 | node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, |
| 410 | *nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | break; |
| 412 | case MPOL_DEFAULT: |
| 413 | break; |
| 414 | case MPOL_INTERLEAVE: |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 415 | *nodes = p->v.nodes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | break; |
| 417 | case MPOL_PREFERRED: |
| 418 | /* or use current node instead of online map? */ |
| 419 | if (p->v.preferred_node < 0) |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 420 | *nodes = node_online_map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | else |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 422 | node_set(p->v.preferred_node, *nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | break; |
| 424 | default: |
| 425 | BUG(); |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | static int lookup_node(struct mm_struct *mm, unsigned long addr) |
| 430 | { |
| 431 | struct page *p; |
| 432 | int err; |
| 433 | |
| 434 | err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); |
| 435 | if (err >= 0) { |
| 436 | err = page_to_nid(p); |
| 437 | put_page(p); |
| 438 | } |
| 439 | return err; |
| 440 | } |
| 441 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | /* Retrieve NUMA policy */ |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 443 | long do_get_mempolicy(int *policy, nodemask_t *nmask, |
| 444 | unsigned long addr, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | { |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 446 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | struct mm_struct *mm = current->mm; |
| 448 | struct vm_area_struct *vma = NULL; |
| 449 | struct mempolicy *pol = current->mempolicy; |
| 450 | |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 451 | cpuset_update_current_mems_allowed(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR)) |
| 453 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | if (flags & MPOL_F_ADDR) { |
| 455 | down_read(&mm->mmap_sem); |
| 456 | vma = find_vma_intersection(mm, addr, addr+1); |
| 457 | if (!vma) { |
| 458 | up_read(&mm->mmap_sem); |
| 459 | return -EFAULT; |
| 460 | } |
| 461 | if (vma->vm_ops && vma->vm_ops->get_policy) |
| 462 | pol = vma->vm_ops->get_policy(vma, addr); |
| 463 | else |
| 464 | pol = vma->vm_policy; |
| 465 | } else if (addr) |
| 466 | return -EINVAL; |
| 467 | |
| 468 | if (!pol) |
| 469 | pol = &default_policy; |
| 470 | |
| 471 | if (flags & MPOL_F_NODE) { |
| 472 | if (flags & MPOL_F_ADDR) { |
| 473 | err = lookup_node(mm, addr); |
| 474 | if (err < 0) |
| 475 | goto out; |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 476 | *policy = err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | } else if (pol == current->mempolicy && |
| 478 | pol->policy == MPOL_INTERLEAVE) { |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 479 | *policy = current->il_next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | } else { |
| 481 | err = -EINVAL; |
| 482 | goto out; |
| 483 | } |
| 484 | } else |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 485 | *policy = pol->policy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | |
| 487 | if (vma) { |
| 488 | up_read(¤t->mm->mmap_sem); |
| 489 | vma = NULL; |
| 490 | } |
| 491 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | err = 0; |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 493 | if (nmask) |
| 494 | get_zonemask(pol, nmask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | |
| 496 | out: |
| 497 | if (vma) |
| 498 | up_read(¤t->mm->mmap_sem); |
| 499 | return err; |
| 500 | } |
| 501 | |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 502 | /* |
| 503 | * User space interface with variable sized bitmaps for nodelists. |
| 504 | */ |
| 505 | |
| 506 | /* Copy a node mask from user space. */ |
| 507 | static int get_nodes(nodemask_t *nodes, unsigned long __user *nmask, |
| 508 | unsigned long maxnode) |
| 509 | { |
| 510 | unsigned long k; |
| 511 | unsigned long nlongs; |
| 512 | unsigned long endmask; |
| 513 | |
| 514 | --maxnode; |
| 515 | nodes_clear(*nodes); |
| 516 | if (maxnode == 0 || !nmask) |
| 517 | return 0; |
| 518 | |
| 519 | nlongs = BITS_TO_LONGS(maxnode); |
| 520 | if ((maxnode % BITS_PER_LONG) == 0) |
| 521 | endmask = ~0UL; |
| 522 | else |
| 523 | endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; |
| 524 | |
| 525 | /* When the user specified more nodes than supported just check |
| 526 | if the non supported part is all zero. */ |
| 527 | if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { |
| 528 | if (nlongs > PAGE_SIZE/sizeof(long)) |
| 529 | return -EINVAL; |
| 530 | for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { |
| 531 | unsigned long t; |
| 532 | if (get_user(t, nmask + k)) |
| 533 | return -EFAULT; |
| 534 | if (k == nlongs - 1) { |
| 535 | if (t & endmask) |
| 536 | return -EINVAL; |
| 537 | } else if (t) |
| 538 | return -EINVAL; |
| 539 | } |
| 540 | nlongs = BITS_TO_LONGS(MAX_NUMNODES); |
| 541 | endmask = ~0UL; |
| 542 | } |
| 543 | |
| 544 | if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) |
| 545 | return -EFAULT; |
| 546 | nodes_addr(*nodes)[nlongs-1] &= endmask; |
| 547 | return 0; |
| 548 | } |
| 549 | |
| 550 | /* Copy a kernel node mask to user space */ |
| 551 | static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, |
| 552 | nodemask_t *nodes) |
| 553 | { |
| 554 | unsigned long copy = ALIGN(maxnode-1, 64) / 8; |
| 555 | const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); |
| 556 | |
| 557 | if (copy > nbytes) { |
| 558 | if (copy > PAGE_SIZE) |
| 559 | return -EINVAL; |
| 560 | if (clear_user((char __user *)mask + nbytes, copy - nbytes)) |
| 561 | return -EFAULT; |
| 562 | copy = nbytes; |
| 563 | } |
| 564 | return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; |
| 565 | } |
| 566 | |
| 567 | asmlinkage long sys_mbind(unsigned long start, unsigned long len, |
| 568 | unsigned long mode, |
| 569 | unsigned long __user *nmask, unsigned long maxnode, |
| 570 | unsigned flags) |
| 571 | { |
| 572 | nodemask_t nodes; |
| 573 | int err; |
| 574 | |
| 575 | err = get_nodes(&nodes, nmask, maxnode); |
| 576 | if (err) |
| 577 | return err; |
| 578 | return do_mbind(start, len, mode, &nodes, flags); |
| 579 | } |
| 580 | |
| 581 | /* Set the process memory policy */ |
| 582 | asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, |
| 583 | unsigned long maxnode) |
| 584 | { |
| 585 | int err; |
| 586 | nodemask_t nodes; |
| 587 | |
| 588 | if (mode < 0 || mode > MPOL_MAX) |
| 589 | return -EINVAL; |
| 590 | err = get_nodes(&nodes, nmask, maxnode); |
| 591 | if (err) |
| 592 | return err; |
| 593 | return do_set_mempolicy(mode, &nodes); |
| 594 | } |
| 595 | |
| 596 | /* Retrieve NUMA policy */ |
| 597 | asmlinkage long sys_get_mempolicy(int __user *policy, |
| 598 | unsigned long __user *nmask, |
| 599 | unsigned long maxnode, |
| 600 | unsigned long addr, unsigned long flags) |
| 601 | { |
| 602 | int err, pval; |
| 603 | nodemask_t nodes; |
| 604 | |
| 605 | if (nmask != NULL && maxnode < MAX_NUMNODES) |
| 606 | return -EINVAL; |
| 607 | |
| 608 | err = do_get_mempolicy(&pval, &nodes, addr, flags); |
| 609 | |
| 610 | if (err) |
| 611 | return err; |
| 612 | |
| 613 | if (policy && put_user(pval, policy)) |
| 614 | return -EFAULT; |
| 615 | |
| 616 | if (nmask) |
| 617 | err = copy_nodes_to_user(nmask, maxnode, &nodes); |
| 618 | |
| 619 | return err; |
| 620 | } |
| 621 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | #ifdef CONFIG_COMPAT |
| 623 | |
| 624 | asmlinkage long compat_sys_get_mempolicy(int __user *policy, |
| 625 | compat_ulong_t __user *nmask, |
| 626 | compat_ulong_t maxnode, |
| 627 | compat_ulong_t addr, compat_ulong_t flags) |
| 628 | { |
| 629 | long err; |
| 630 | unsigned long __user *nm = NULL; |
| 631 | unsigned long nr_bits, alloc_size; |
| 632 | DECLARE_BITMAP(bm, MAX_NUMNODES); |
| 633 | |
| 634 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); |
| 635 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; |
| 636 | |
| 637 | if (nmask) |
| 638 | nm = compat_alloc_user_space(alloc_size); |
| 639 | |
| 640 | err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); |
| 641 | |
| 642 | if (!err && nmask) { |
| 643 | err = copy_from_user(bm, nm, alloc_size); |
| 644 | /* ensure entire bitmap is zeroed */ |
| 645 | err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); |
| 646 | err |= compat_put_bitmap(nmask, bm, nr_bits); |
| 647 | } |
| 648 | |
| 649 | return err; |
| 650 | } |
| 651 | |
| 652 | asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, |
| 653 | compat_ulong_t maxnode) |
| 654 | { |
| 655 | long err = 0; |
| 656 | unsigned long __user *nm = NULL; |
| 657 | unsigned long nr_bits, alloc_size; |
| 658 | DECLARE_BITMAP(bm, MAX_NUMNODES); |
| 659 | |
| 660 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); |
| 661 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; |
| 662 | |
| 663 | if (nmask) { |
| 664 | err = compat_get_bitmap(bm, nmask, nr_bits); |
| 665 | nm = compat_alloc_user_space(alloc_size); |
| 666 | err |= copy_to_user(nm, bm, alloc_size); |
| 667 | } |
| 668 | |
| 669 | if (err) |
| 670 | return -EFAULT; |
| 671 | |
| 672 | return sys_set_mempolicy(mode, nm, nr_bits+1); |
| 673 | } |
| 674 | |
| 675 | asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, |
| 676 | compat_ulong_t mode, compat_ulong_t __user *nmask, |
| 677 | compat_ulong_t maxnode, compat_ulong_t flags) |
| 678 | { |
| 679 | long err = 0; |
| 680 | unsigned long __user *nm = NULL; |
| 681 | unsigned long nr_bits, alloc_size; |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 682 | nodemask_t bm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | |
| 684 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); |
| 685 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; |
| 686 | |
| 687 | if (nmask) { |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 688 | err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | nm = compat_alloc_user_space(alloc_size); |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 690 | err |= copy_to_user(nm, nodes_addr(bm), alloc_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | } |
| 692 | |
| 693 | if (err) |
| 694 | return -EFAULT; |
| 695 | |
| 696 | return sys_mbind(start, len, mode, nm, nr_bits+1, flags); |
| 697 | } |
| 698 | |
| 699 | #endif |
| 700 | |
| 701 | /* Return effective policy for a VMA */ |
Christoph Lameter | 6e21c8f | 2005-09-03 15:54:45 -0700 | [diff] [blame] | 702 | struct mempolicy * |
| 703 | get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | { |
Christoph Lameter | 6e21c8f | 2005-09-03 15:54:45 -0700 | [diff] [blame] | 705 | struct mempolicy *pol = task->mempolicy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | |
| 707 | if (vma) { |
| 708 | if (vma->vm_ops && vma->vm_ops->get_policy) |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 709 | pol = vma->vm_ops->get_policy(vma, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | else if (vma->vm_policy && |
| 711 | vma->vm_policy->policy != MPOL_DEFAULT) |
| 712 | pol = vma->vm_policy; |
| 713 | } |
| 714 | if (!pol) |
| 715 | pol = &default_policy; |
| 716 | return pol; |
| 717 | } |
| 718 | |
| 719 | /* Return a zonelist representing a mempolicy */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 720 | static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | { |
| 722 | int nd; |
| 723 | |
| 724 | switch (policy->policy) { |
| 725 | case MPOL_PREFERRED: |
| 726 | nd = policy->v.preferred_node; |
| 727 | if (nd < 0) |
| 728 | nd = numa_node_id(); |
| 729 | break; |
| 730 | case MPOL_BIND: |
| 731 | /* Lower zones don't get a policy applied */ |
| 732 | /* Careful: current->mems_allowed might have moved */ |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 733 | if (gfp_zone(gfp) >= policy_zone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) |
| 735 | return policy->v.zonelist; |
| 736 | /*FALL THROUGH*/ |
| 737 | case MPOL_INTERLEAVE: /* should not happen */ |
| 738 | case MPOL_DEFAULT: |
| 739 | nd = numa_node_id(); |
| 740 | break; |
| 741 | default: |
| 742 | nd = 0; |
| 743 | BUG(); |
| 744 | } |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 745 | return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | } |
| 747 | |
| 748 | /* Do dynamic interleaving for a process */ |
| 749 | static unsigned interleave_nodes(struct mempolicy *policy) |
| 750 | { |
| 751 | unsigned nid, next; |
| 752 | struct task_struct *me = current; |
| 753 | |
| 754 | nid = me->il_next; |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 755 | next = next_node(nid, policy->v.nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | if (next >= MAX_NUMNODES) |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 757 | next = first_node(policy->v.nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | me->il_next = next; |
| 759 | return nid; |
| 760 | } |
| 761 | |
| 762 | /* Do static interleaving for a VMA with known offset. */ |
| 763 | static unsigned offset_il_node(struct mempolicy *pol, |
| 764 | struct vm_area_struct *vma, unsigned long off) |
| 765 | { |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 766 | unsigned nnodes = nodes_weight(pol->v.nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | unsigned target = (unsigned)off % nnodes; |
| 768 | int c; |
| 769 | int nid = -1; |
| 770 | |
| 771 | c = 0; |
| 772 | do { |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 773 | nid = next_node(nid, pol->v.nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | c++; |
| 775 | } while (c <= target); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | return nid; |
| 777 | } |
| 778 | |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 779 | /* Determine a node number for interleave */ |
| 780 | static inline unsigned interleave_nid(struct mempolicy *pol, |
| 781 | struct vm_area_struct *vma, unsigned long addr, int shift) |
| 782 | { |
| 783 | if (vma) { |
| 784 | unsigned long off; |
| 785 | |
| 786 | off = vma->vm_pgoff; |
| 787 | off += (addr - vma->vm_start) >> shift; |
| 788 | return offset_il_node(pol, vma, off); |
| 789 | } else |
| 790 | return interleave_nodes(pol); |
| 791 | } |
| 792 | |
| 793 | /* Return a zonelist suitable for a huge page allocation. */ |
| 794 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) |
| 795 | { |
| 796 | struct mempolicy *pol = get_vma_policy(current, vma, addr); |
| 797 | |
| 798 | if (pol->policy == MPOL_INTERLEAVE) { |
| 799 | unsigned nid; |
| 800 | |
| 801 | nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); |
| 802 | return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER); |
| 803 | } |
| 804 | return zonelist_policy(GFP_HIGHUSER, pol); |
| 805 | } |
| 806 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | /* Allocate a page in interleaved policy. |
| 808 | Own path because it needs to do special accounting. */ |
Andi Kleen | 662f3a0 | 2005-10-29 18:15:49 -0700 | [diff] [blame] | 809 | static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, |
| 810 | unsigned nid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | { |
| 812 | struct zonelist *zl; |
| 813 | struct page *page; |
| 814 | |
Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 815 | zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | page = __alloc_pages(gfp, order, zl); |
| 817 | if (page && page_zone(page) == zl->zones[0]) { |
Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 818 | zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | put_cpu(); |
| 820 | } |
| 821 | return page; |
| 822 | } |
| 823 | |
| 824 | /** |
| 825 | * alloc_page_vma - Allocate a page for a VMA. |
| 826 | * |
| 827 | * @gfp: |
| 828 | * %GFP_USER user allocation. |
| 829 | * %GFP_KERNEL kernel allocations, |
| 830 | * %GFP_HIGHMEM highmem/user allocations, |
| 831 | * %GFP_FS allocation should not call back into a file system. |
| 832 | * %GFP_ATOMIC don't sleep. |
| 833 | * |
| 834 | * @vma: Pointer to VMA or NULL if not available. |
| 835 | * @addr: Virtual Address of the allocation. Must be inside the VMA. |
| 836 | * |
| 837 | * This function allocates a page from the kernel page pool and applies |
| 838 | * a NUMA policy associated with the VMA or the current process. |
| 839 | * When VMA is not NULL caller must hold down_read on the mmap_sem of the |
| 840 | * mm_struct of the VMA to prevent it from going away. Should be used for |
| 841 | * all allocations for pages that will be mapped into |
| 842 | * user space. Returns NULL when no page can be allocated. |
| 843 | * |
| 844 | * Should be called with the mm_sem of the vma hold. |
| 845 | */ |
| 846 | struct page * |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 847 | alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | { |
Christoph Lameter | 6e21c8f | 2005-09-03 15:54:45 -0700 | [diff] [blame] | 849 | struct mempolicy *pol = get_vma_policy(current, vma, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | |
| 851 | cpuset_update_current_mems_allowed(); |
| 852 | |
| 853 | if (unlikely(pol->policy == MPOL_INTERLEAVE)) { |
| 854 | unsigned nid; |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 855 | |
| 856 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | return alloc_page_interleave(gfp, 0, nid); |
| 858 | } |
| 859 | return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); |
| 860 | } |
| 861 | |
| 862 | /** |
| 863 | * alloc_pages_current - Allocate pages. |
| 864 | * |
| 865 | * @gfp: |
| 866 | * %GFP_USER user allocation, |
| 867 | * %GFP_KERNEL kernel allocation, |
| 868 | * %GFP_HIGHMEM highmem allocation, |
| 869 | * %GFP_FS don't call back into a file system. |
| 870 | * %GFP_ATOMIC don't sleep. |
| 871 | * @order: Power of two of allocation size in pages. 0 is a single page. |
| 872 | * |
| 873 | * Allocate a page from the kernel page pool. When not in |
| 874 | * interrupt context and apply the current process NUMA policy. |
| 875 | * Returns NULL when no page can be allocated. |
| 876 | * |
| 877 | * Don't call cpuset_update_current_mems_allowed() unless |
| 878 | * 1) it's ok to take cpuset_sem (can WAIT), and |
| 879 | * 2) allocating for current task (not interrupt). |
| 880 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 881 | struct page *alloc_pages_current(gfp_t gfp, unsigned order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | { |
| 883 | struct mempolicy *pol = current->mempolicy; |
| 884 | |
| 885 | if ((gfp & __GFP_WAIT) && !in_interrupt()) |
| 886 | cpuset_update_current_mems_allowed(); |
| 887 | if (!pol || in_interrupt()) |
| 888 | pol = &default_policy; |
| 889 | if (pol->policy == MPOL_INTERLEAVE) |
| 890 | return alloc_page_interleave(gfp, order, interleave_nodes(pol)); |
| 891 | return __alloc_pages(gfp, order, zonelist_policy(gfp, pol)); |
| 892 | } |
| 893 | EXPORT_SYMBOL(alloc_pages_current); |
| 894 | |
| 895 | /* Slow path of a mempolicy copy */ |
| 896 | struct mempolicy *__mpol_copy(struct mempolicy *old) |
| 897 | { |
| 898 | struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); |
| 899 | |
| 900 | if (!new) |
| 901 | return ERR_PTR(-ENOMEM); |
| 902 | *new = *old; |
| 903 | atomic_set(&new->refcnt, 1); |
| 904 | if (new->policy == MPOL_BIND) { |
| 905 | int sz = ksize(old->v.zonelist); |
| 906 | new->v.zonelist = kmalloc(sz, SLAB_KERNEL); |
| 907 | if (!new->v.zonelist) { |
| 908 | kmem_cache_free(policy_cache, new); |
| 909 | return ERR_PTR(-ENOMEM); |
| 910 | } |
| 911 | memcpy(new->v.zonelist, old->v.zonelist, sz); |
| 912 | } |
| 913 | return new; |
| 914 | } |
| 915 | |
| 916 | /* Slow path of a mempolicy comparison */ |
| 917 | int __mpol_equal(struct mempolicy *a, struct mempolicy *b) |
| 918 | { |
| 919 | if (!a || !b) |
| 920 | return 0; |
| 921 | if (a->policy != b->policy) |
| 922 | return 0; |
| 923 | switch (a->policy) { |
| 924 | case MPOL_DEFAULT: |
| 925 | return 1; |
| 926 | case MPOL_INTERLEAVE: |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 927 | return nodes_equal(a->v.nodes, b->v.nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | case MPOL_PREFERRED: |
| 929 | return a->v.preferred_node == b->v.preferred_node; |
| 930 | case MPOL_BIND: { |
| 931 | int i; |
| 932 | for (i = 0; a->v.zonelist->zones[i]; i++) |
| 933 | if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i]) |
| 934 | return 0; |
| 935 | return b->v.zonelist->zones[i] == NULL; |
| 936 | } |
| 937 | default: |
| 938 | BUG(); |
| 939 | return 0; |
| 940 | } |
| 941 | } |
| 942 | |
| 943 | /* Slow path of a mpol destructor. */ |
| 944 | void __mpol_free(struct mempolicy *p) |
| 945 | { |
| 946 | if (!atomic_dec_and_test(&p->refcnt)) |
| 947 | return; |
| 948 | if (p->policy == MPOL_BIND) |
| 949 | kfree(p->v.zonelist); |
| 950 | p->policy = MPOL_DEFAULT; |
| 951 | kmem_cache_free(policy_cache, p); |
| 952 | } |
| 953 | |
| 954 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | * Shared memory backing store policy support. |
| 956 | * |
| 957 | * Remember policies even when nobody has shared memory mapped. |
| 958 | * The policies are kept in Red-Black tree linked from the inode. |
| 959 | * They are protected by the sp->lock spinlock, which should be held |
| 960 | * for any accesses to the tree. |
| 961 | */ |
| 962 | |
| 963 | /* lookup first element intersecting start-end */ |
| 964 | /* Caller holds sp->lock */ |
| 965 | static struct sp_node * |
| 966 | sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) |
| 967 | { |
| 968 | struct rb_node *n = sp->root.rb_node; |
| 969 | |
| 970 | while (n) { |
| 971 | struct sp_node *p = rb_entry(n, struct sp_node, nd); |
| 972 | |
| 973 | if (start >= p->end) |
| 974 | n = n->rb_right; |
| 975 | else if (end <= p->start) |
| 976 | n = n->rb_left; |
| 977 | else |
| 978 | break; |
| 979 | } |
| 980 | if (!n) |
| 981 | return NULL; |
| 982 | for (;;) { |
| 983 | struct sp_node *w = NULL; |
| 984 | struct rb_node *prev = rb_prev(n); |
| 985 | if (!prev) |
| 986 | break; |
| 987 | w = rb_entry(prev, struct sp_node, nd); |
| 988 | if (w->end <= start) |
| 989 | break; |
| 990 | n = prev; |
| 991 | } |
| 992 | return rb_entry(n, struct sp_node, nd); |
| 993 | } |
| 994 | |
| 995 | /* Insert a new shared policy into the list. */ |
| 996 | /* Caller holds sp->lock */ |
| 997 | static void sp_insert(struct shared_policy *sp, struct sp_node *new) |
| 998 | { |
| 999 | struct rb_node **p = &sp->root.rb_node; |
| 1000 | struct rb_node *parent = NULL; |
| 1001 | struct sp_node *nd; |
| 1002 | |
| 1003 | while (*p) { |
| 1004 | parent = *p; |
| 1005 | nd = rb_entry(parent, struct sp_node, nd); |
| 1006 | if (new->start < nd->start) |
| 1007 | p = &(*p)->rb_left; |
| 1008 | else if (new->end > nd->end) |
| 1009 | p = &(*p)->rb_right; |
| 1010 | else |
| 1011 | BUG(); |
| 1012 | } |
| 1013 | rb_link_node(&new->nd, parent, p); |
| 1014 | rb_insert_color(&new->nd, &sp->root); |
| 1015 | PDprintk("inserting %lx-%lx: %d\n", new->start, new->end, |
| 1016 | new->policy ? new->policy->policy : 0); |
| 1017 | } |
| 1018 | |
| 1019 | /* Find shared policy intersecting idx */ |
| 1020 | struct mempolicy * |
| 1021 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) |
| 1022 | { |
| 1023 | struct mempolicy *pol = NULL; |
| 1024 | struct sp_node *sn; |
| 1025 | |
| 1026 | if (!sp->root.rb_node) |
| 1027 | return NULL; |
| 1028 | spin_lock(&sp->lock); |
| 1029 | sn = sp_lookup(sp, idx, idx+1); |
| 1030 | if (sn) { |
| 1031 | mpol_get(sn->policy); |
| 1032 | pol = sn->policy; |
| 1033 | } |
| 1034 | spin_unlock(&sp->lock); |
| 1035 | return pol; |
| 1036 | } |
| 1037 | |
| 1038 | static void sp_delete(struct shared_policy *sp, struct sp_node *n) |
| 1039 | { |
| 1040 | PDprintk("deleting %lx-l%x\n", n->start, n->end); |
| 1041 | rb_erase(&n->nd, &sp->root); |
| 1042 | mpol_free(n->policy); |
| 1043 | kmem_cache_free(sn_cache, n); |
| 1044 | } |
| 1045 | |
| 1046 | struct sp_node * |
| 1047 | sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) |
| 1048 | { |
| 1049 | struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); |
| 1050 | |
| 1051 | if (!n) |
| 1052 | return NULL; |
| 1053 | n->start = start; |
| 1054 | n->end = end; |
| 1055 | mpol_get(pol); |
| 1056 | n->policy = pol; |
| 1057 | return n; |
| 1058 | } |
| 1059 | |
| 1060 | /* Replace a policy range. */ |
| 1061 | static int shared_policy_replace(struct shared_policy *sp, unsigned long start, |
| 1062 | unsigned long end, struct sp_node *new) |
| 1063 | { |
| 1064 | struct sp_node *n, *new2 = NULL; |
| 1065 | |
| 1066 | restart: |
| 1067 | spin_lock(&sp->lock); |
| 1068 | n = sp_lookup(sp, start, end); |
| 1069 | /* Take care of old policies in the same range. */ |
| 1070 | while (n && n->start < end) { |
| 1071 | struct rb_node *next = rb_next(&n->nd); |
| 1072 | if (n->start >= start) { |
| 1073 | if (n->end <= end) |
| 1074 | sp_delete(sp, n); |
| 1075 | else |
| 1076 | n->start = end; |
| 1077 | } else { |
| 1078 | /* Old policy spanning whole new range. */ |
| 1079 | if (n->end > end) { |
| 1080 | if (!new2) { |
| 1081 | spin_unlock(&sp->lock); |
| 1082 | new2 = sp_alloc(end, n->end, n->policy); |
| 1083 | if (!new2) |
| 1084 | return -ENOMEM; |
| 1085 | goto restart; |
| 1086 | } |
| 1087 | n->end = start; |
| 1088 | sp_insert(sp, new2); |
| 1089 | new2 = NULL; |
| 1090 | break; |
| 1091 | } else |
| 1092 | n->end = start; |
| 1093 | } |
| 1094 | if (!next) |
| 1095 | break; |
| 1096 | n = rb_entry(next, struct sp_node, nd); |
| 1097 | } |
| 1098 | if (new) |
| 1099 | sp_insert(sp, new); |
| 1100 | spin_unlock(&sp->lock); |
| 1101 | if (new2) { |
| 1102 | mpol_free(new2->policy); |
| 1103 | kmem_cache_free(sn_cache, new2); |
| 1104 | } |
| 1105 | return 0; |
| 1106 | } |
| 1107 | |
| 1108 | int mpol_set_shared_policy(struct shared_policy *info, |
| 1109 | struct vm_area_struct *vma, struct mempolicy *npol) |
| 1110 | { |
| 1111 | int err; |
| 1112 | struct sp_node *new = NULL; |
| 1113 | unsigned long sz = vma_pages(vma); |
| 1114 | |
| 1115 | PDprintk("set_shared_policy %lx sz %lu %d %lx\n", |
| 1116 | vma->vm_pgoff, |
| 1117 | sz, npol? npol->policy : -1, |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 1118 | npol ? nodes_addr(npol->v.nodes)[0] : -1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | |
| 1120 | if (npol) { |
| 1121 | new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); |
| 1122 | if (!new) |
| 1123 | return -ENOMEM; |
| 1124 | } |
| 1125 | err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); |
| 1126 | if (err && new) |
| 1127 | kmem_cache_free(sn_cache, new); |
| 1128 | return err; |
| 1129 | } |
| 1130 | |
| 1131 | /* Free a backing policy store on inode delete. */ |
| 1132 | void mpol_free_shared_policy(struct shared_policy *p) |
| 1133 | { |
| 1134 | struct sp_node *n; |
| 1135 | struct rb_node *next; |
| 1136 | |
| 1137 | if (!p->root.rb_node) |
| 1138 | return; |
| 1139 | spin_lock(&p->lock); |
| 1140 | next = rb_first(&p->root); |
| 1141 | while (next) { |
| 1142 | n = rb_entry(next, struct sp_node, nd); |
| 1143 | next = rb_next(&n->nd); |
Andi Kleen | 90c5029 | 2005-07-27 11:43:50 -0700 | [diff] [blame] | 1144 | rb_erase(&n->nd, &p->root); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | mpol_free(n->policy); |
| 1146 | kmem_cache_free(sn_cache, n); |
| 1147 | } |
| 1148 | spin_unlock(&p->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | } |
| 1150 | |
| 1151 | /* assumes fs == KERNEL_DS */ |
| 1152 | void __init numa_policy_init(void) |
| 1153 | { |
| 1154 | policy_cache = kmem_cache_create("numa_policy", |
| 1155 | sizeof(struct mempolicy), |
| 1156 | 0, SLAB_PANIC, NULL, NULL); |
| 1157 | |
| 1158 | sn_cache = kmem_cache_create("shared_policy_node", |
| 1159 | sizeof(struct sp_node), |
| 1160 | 0, SLAB_PANIC, NULL, NULL); |
| 1161 | |
| 1162 | /* Set interleaving policy for system init. This way not all |
| 1163 | the data structures allocated at system boot end up in node zero. */ |
| 1164 | |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 1165 | if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | printk("numa_policy_init: interleaving failed\n"); |
| 1167 | } |
| 1168 | |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 1169 | /* Reset policy of current process to default */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 | void numa_default_policy(void) |
| 1171 | { |
Christoph Lameter | 8bccd85 | 2005-10-29 18:16:59 -0700 | [diff] [blame] | 1172 | do_set_mempolicy(MPOL_DEFAULT, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | } |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 1174 | |
| 1175 | /* Migrate a policy to a different set of nodes */ |
| 1176 | static void rebind_policy(struct mempolicy *pol, const nodemask_t *old, |
| 1177 | const nodemask_t *new) |
| 1178 | { |
| 1179 | nodemask_t tmp; |
| 1180 | |
| 1181 | if (!pol) |
| 1182 | return; |
| 1183 | |
| 1184 | switch (pol->policy) { |
| 1185 | case MPOL_DEFAULT: |
| 1186 | break; |
| 1187 | case MPOL_INTERLEAVE: |
| 1188 | nodes_remap(tmp, pol->v.nodes, *old, *new); |
| 1189 | pol->v.nodes = tmp; |
| 1190 | current->il_next = node_remap(current->il_next, *old, *new); |
| 1191 | break; |
| 1192 | case MPOL_PREFERRED: |
| 1193 | pol->v.preferred_node = node_remap(pol->v.preferred_node, |
| 1194 | *old, *new); |
| 1195 | break; |
| 1196 | case MPOL_BIND: { |
| 1197 | nodemask_t nodes; |
| 1198 | struct zone **z; |
| 1199 | struct zonelist *zonelist; |
| 1200 | |
| 1201 | nodes_clear(nodes); |
| 1202 | for (z = pol->v.zonelist->zones; *z; z++) |
| 1203 | node_set((*z)->zone_pgdat->node_id, nodes); |
| 1204 | nodes_remap(tmp, nodes, *old, *new); |
| 1205 | nodes = tmp; |
| 1206 | |
| 1207 | zonelist = bind_zonelist(&nodes); |
| 1208 | |
| 1209 | /* If no mem, then zonelist is NULL and we keep old zonelist. |
| 1210 | * If that old zonelist has no remaining mems_allowed nodes, |
| 1211 | * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT. |
| 1212 | */ |
| 1213 | |
| 1214 | if (zonelist) { |
| 1215 | /* Good - got mem - substitute new zonelist */ |
| 1216 | kfree(pol->v.zonelist); |
| 1217 | pol->v.zonelist = zonelist; |
| 1218 | } |
| 1219 | break; |
| 1220 | } |
| 1221 | default: |
| 1222 | BUG(); |
| 1223 | break; |
| 1224 | } |
| 1225 | } |
| 1226 | |
| 1227 | /* |
| 1228 | * Someone moved this task to different nodes. Fixup mempolicies. |
| 1229 | * |
| 1230 | * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well, |
| 1231 | * once we have a cpuset mechanism to mark which cpuset subtree is migrating. |
| 1232 | */ |
| 1233 | void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new) |
| 1234 | { |
| 1235 | rebind_policy(current->mempolicy, old, new); |
| 1236 | } |