David S. Miller | df077ac | 2011-07-25 17:12:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Lockless get_user_pages_fast for sparc, cribbed from powerpc |
| 3 | * |
| 4 | * Copyright (C) 2008 Nick Piggin |
| 5 | * Copyright (C) 2008 Novell Inc. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/vmstat.h> |
| 11 | #include <linux/pagemap.h> |
| 12 | #include <linux/rwsem.h> |
| 13 | #include <asm/pgtable.h> |
| 14 | |
| 15 | /* |
| 16 | * The performance critical leaf functions are made noinline otherwise gcc |
| 17 | * inlines everything into a single function which results in too much |
| 18 | * register pressure. |
| 19 | */ |
| 20 | static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, |
| 21 | unsigned long end, int write, struct page **pages, int *nr) |
| 22 | { |
| 23 | unsigned long mask, result; |
| 24 | pte_t *ptep; |
| 25 | |
| 26 | if (tlb_type == hypervisor) { |
| 27 | result = _PAGE_PRESENT_4V|_PAGE_P_4V; |
| 28 | if (write) |
| 29 | result |= _PAGE_WRITE_4V; |
| 30 | } else { |
| 31 | result = _PAGE_PRESENT_4U|_PAGE_P_4U; |
| 32 | if (write) |
| 33 | result |= _PAGE_WRITE_4U; |
| 34 | } |
| 35 | mask = result | _PAGE_SPECIAL; |
| 36 | |
| 37 | ptep = pte_offset_kernel(&pmd, addr); |
| 38 | do { |
| 39 | struct page *page, *head; |
| 40 | pte_t pte = *ptep; |
| 41 | |
| 42 | if ((pte_val(pte) & mask) != result) |
| 43 | return 0; |
| 44 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 45 | |
| 46 | /* The hugepage case is simplified on sparc64 because |
| 47 | * we encode the sub-page pfn offsets into the |
| 48 | * hugepage PTEs. We could optimize this in the future |
| 49 | * use page_cache_add_speculative() for the hugepage case. |
| 50 | */ |
| 51 | page = pte_page(pte); |
| 52 | head = compound_head(page); |
| 53 | if (!page_cache_get_speculative(head)) |
| 54 | return 0; |
| 55 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
| 56 | put_page(head); |
| 57 | return 0; |
| 58 | } |
Andrea Arcangeli | e0d85a3 | 2011-11-02 13:37:31 -0700 | [diff] [blame] | 59 | if (head != page) |
| 60 | get_huge_page_tail(page); |
David S. Miller | df077ac | 2011-07-25 17:12:22 -0700 | [diff] [blame] | 61 | |
| 62 | pages[*nr] = page; |
| 63 | (*nr)++; |
| 64 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
| 65 | |
| 66 | return 1; |
| 67 | } |
| 68 | |
| 69 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
| 70 | int write, struct page **pages, int *nr) |
| 71 | { |
| 72 | unsigned long next; |
| 73 | pmd_t *pmdp; |
| 74 | |
| 75 | pmdp = pmd_offset(&pud, addr); |
| 76 | do { |
| 77 | pmd_t pmd = *pmdp; |
| 78 | |
| 79 | next = pmd_addr_end(addr, end); |
| 80 | if (pmd_none(pmd)) |
| 81 | return 0; |
| 82 | if (!gup_pte_range(pmd, addr, next, write, pages, nr)) |
| 83 | return 0; |
| 84 | } while (pmdp++, addr = next, addr != end); |
| 85 | |
| 86 | return 1; |
| 87 | } |
| 88 | |
| 89 | static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, |
| 90 | int write, struct page **pages, int *nr) |
| 91 | { |
| 92 | unsigned long next; |
| 93 | pud_t *pudp; |
| 94 | |
| 95 | pudp = pud_offset(&pgd, addr); |
| 96 | do { |
| 97 | pud_t pud = *pudp; |
| 98 | |
| 99 | next = pud_addr_end(addr, end); |
| 100 | if (pud_none(pud)) |
| 101 | return 0; |
| 102 | if (!gup_pmd_range(pud, addr, next, write, pages, nr)) |
| 103 | return 0; |
| 104 | } while (pudp++, addr = next, addr != end); |
| 105 | |
| 106 | return 1; |
| 107 | } |
| 108 | |
| 109 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
| 110 | struct page **pages) |
| 111 | { |
| 112 | struct mm_struct *mm = current->mm; |
| 113 | unsigned long addr, len, end; |
| 114 | unsigned long next; |
| 115 | pgd_t *pgdp; |
| 116 | int nr = 0; |
| 117 | |
| 118 | start &= PAGE_MASK; |
| 119 | addr = start; |
| 120 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
| 121 | end = start + len; |
| 122 | |
| 123 | /* |
| 124 | * XXX: batch / limit 'nr', to avoid large irq off latency |
| 125 | * needs some instrumenting to determine the common sizes used by |
| 126 | * important workloads (eg. DB2), and whether limiting the batch size |
| 127 | * will decrease performance. |
| 128 | * |
| 129 | * It seems like we're in the clear for the moment. Direct-IO is |
| 130 | * the main guy that batches up lots of get_user_pages, and even |
| 131 | * they are limited to 64-at-a-time which is not so many. |
| 132 | */ |
| 133 | /* |
| 134 | * This doesn't prevent pagetable teardown, but does prevent |
| 135 | * the pagetables from being freed on sparc. |
| 136 | * |
| 137 | * So long as we atomically load page table pointers versus teardown, |
| 138 | * we can follow the address down to the the page and take a ref on it. |
| 139 | */ |
| 140 | local_irq_disable(); |
| 141 | |
| 142 | pgdp = pgd_offset(mm, addr); |
| 143 | do { |
| 144 | pgd_t pgd = *pgdp; |
| 145 | |
| 146 | next = pgd_addr_end(addr, end); |
| 147 | if (pgd_none(pgd)) |
| 148 | goto slow; |
| 149 | if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) |
| 150 | goto slow; |
| 151 | } while (pgdp++, addr = next, addr != end); |
| 152 | |
| 153 | local_irq_enable(); |
| 154 | |
| 155 | VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); |
| 156 | return nr; |
| 157 | |
| 158 | { |
| 159 | int ret; |
| 160 | |
| 161 | slow: |
| 162 | local_irq_enable(); |
| 163 | |
| 164 | /* Try to get the remaining pages with get_user_pages */ |
| 165 | start += nr << PAGE_SHIFT; |
| 166 | pages += nr; |
| 167 | |
| 168 | down_read(&mm->mmap_sem); |
| 169 | ret = get_user_pages(current, mm, start, |
| 170 | (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); |
| 171 | up_read(&mm->mmap_sem); |
| 172 | |
| 173 | /* Have to be a bit careful with return values */ |
| 174 | if (nr > 0) { |
| 175 | if (ret < 0) |
| 176 | ret = nr; |
| 177 | else |
| 178 | ret += nr; |
| 179 | } |
| 180 | |
| 181 | return ret; |
| 182 | } |
| 183 | } |