blob: d08e6d7d533b049a607fbce4513c903acdc73472 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ralf Baechleb99fbc12012-09-06 11:29:53 +02002#include <linux/compiler.h>
Paul Gortmakerd9ba5772016-08-21 15:58:14 -04003#include <linux/init.h>
4#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/highmem.h>
Yoichi Yuasa52ab3202010-02-20 21:23:22 +09006#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +01007#include <linux/smp.h>
Ralf Baechlebb86bf22009-04-25 11:25:34 +02008#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/tlbflush.h>
10
Ralf Baechlebb86bf22009-04-25 11:25:34 +020011static pte_t *kmap_pte;
12
13unsigned long highstart_pfn, highend_pfn;
14
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070015void *kmap(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016{
17 void *addr;
18
19 might_sleep();
20 if (!PageHighMem(page))
21 return page_address(page);
22 addr = kmap_high(page);
23 flush_tlb_one((unsigned long)addr);
24
25 return addr;
26}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070027EXPORT_SYMBOL(kmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070029void kunmap(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
Ralf Baechleb72b7092009-03-30 14:49:44 +020031 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 if (!PageHighMem(page))
33 return;
34 kunmap_high(page);
35}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070036EXPORT_SYMBOL(kunmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38/*
39 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
40 * no global lock is needed and because the kmap code must perform a global TLB
41 * invalidation when the kmap pool wraps.
42 *
43 * However when holding an atomic kmap is is not legal to sleep, so atomic
44 * kmaps are appropriate for short, tight code paths only.
45 */
46
Cong Wanga24401b2011-11-26 10:53:39 +080047void *kmap_atomic(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 unsigned long vaddr;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070050 int idx, type;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020052 preempt_disable();
Peter Zijlstraa8663742006-12-06 20:32:20 -080053 pagefault_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 if (!PageHighMem(page))
55 return page_address(page);
56
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070057 type = kmap_atomic_idx_push();
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 idx = type + KM_TYPE_NR*smp_processor_id();
59 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
60#ifdef CONFIG_DEBUG_HIGHMEM
Ralf Baechleb72b7092009-03-30 14:49:44 +020061 BUG_ON(!pte_none(*(kmap_pte - idx)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#endif
Ralf Baechlebb86bf22009-04-25 11:25:34 +020063 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 local_flush_tlb_one((unsigned long)vaddr);
65
66 return (void*) vaddr;
67}
Cong Wanga24401b2011-11-26 10:53:39 +080068EXPORT_SYMBOL(kmap_atomic);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070070void __kunmap_atomic(void *kvaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
Ralf Baechleb99fbc12012-09-06 11:29:53 +020073 int type __maybe_unused;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 if (vaddr < FIXADDR_START) { // FIXME
Peter Zijlstraa8663742006-12-06 20:32:20 -080076 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020077 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 return;
79 }
80
Peter Zijlstra20273942010-10-27 15:32:58 -070081 type = kmap_atomic_idx();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070082#ifdef CONFIG_DEBUG_HIGHMEM
83 {
84 int idx = type + KM_TYPE_NR * smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070086 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
87
88 /*
89 * force other mappings to Oops if they'll try to access
90 * this pte without first remap it
91 */
92 pte_clear(&init_mm, vaddr, kmap_pte-idx);
93 local_flush_tlb_one(vaddr);
94 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#endif
Peter Zijlstra20273942010-10-27 15:32:58 -070096 kmap_atomic_idx_pop();
Peter Zijlstraa8663742006-12-06 20:32:20 -080097 pagefault_enable();
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +020098 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700100EXPORT_SYMBOL(__kunmap_atomic);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Ralf Baechle60080262005-07-11 20:45:51 +0000102/*
103 * This is the same as kmap_atomic() but can map memory that doesn't
104 * have a struct page associated with it.
105 */
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700106void *kmap_atomic_pfn(unsigned long pfn)
Ralf Baechle60080262005-07-11 20:45:51 +0000107{
Ralf Baechle60080262005-07-11 20:45:51 +0000108 unsigned long vaddr;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700109 int idx, type;
Ralf Baechle60080262005-07-11 20:45:51 +0000110
David Hildenbrand2cb7c9c2015-05-11 17:52:09 +0200111 preempt_disable();
Peter Zijlstraa8663742006-12-06 20:32:20 -0800112 pagefault_disable();
Ralf Baechle60080262005-07-11 20:45:51 +0000113
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700114 type = kmap_atomic_idx_push();
Ralf Baechle60080262005-07-11 20:45:51 +0000115 idx = type + KM_TYPE_NR*smp_processor_id();
116 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
Ralf Baechlebb86bf22009-04-25 11:25:34 +0200117 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
Ralf Baechle60080262005-07-11 20:45:51 +0000118 flush_tlb_one(vaddr);
119
120 return (void*) vaddr;
121}
122
Ralf Baechlebb86bf22009-04-25 11:25:34 +0200123void __init kmap_init(void)
124{
125 unsigned long kmap_vstart;
126
127 /* cache the first kmap pte */
128 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
129 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
130}