blob: bad690298f0c01fe89a740d2829b8b33f5316f54 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __PARISC_MMU_CONTEXT_H
2#define __PARISC_MMU_CONTEXT_H
3
4#include <linux/mm.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/pgtable.h>
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +02008#include <asm-generic/mm_hooks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11{
12}
13
14/* on PA-RISC, we actually have enough contexts to justify an allocator
15 * for them. prumpf */
16
17extern unsigned long alloc_sid(void);
18extern void free_sid(unsigned long);
19
20static inline int
21init_new_context(struct task_struct *tsk, struct mm_struct *mm)
22{
23 BUG_ON(atomic_read(&mm->mm_users) != 1);
24
25 mm->context = alloc_sid();
26 return 0;
27}
28
29static inline void
30destroy_context(struct mm_struct *mm)
31{
32 free_sid(mm->context);
33 mm->context = 0;
34}
35
36static inline void load_context(mm_context_t context)
37{
38 mtsp(context, 3);
39#if SPACEID_SHIFT == 0
40 mtctl(context << 1,8);
41#else
42 mtctl(context >> (SPACEID_SHIFT - 1),8);
43#endif
44}
45
46static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
47{
48
49 if (prev != next) {
50 mtctl(__pa(next->pgd), 25);
51 load_context(next->context);
52 }
53}
54
55#define deactivate_mm(tsk,mm) do { } while (0)
56
57static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
58{
59 /*
60 * Activate_mm is our one chance to allocate a space id
61 * for a new mm created in the exec path. There's also
62 * some lazy tlb stuff, which is currently dead code, but
63 * we only allocate a space id if one hasn't been allocated
64 * already, so we should be OK.
65 */
66
67 BUG_ON(next == &init_mm); /* Should never happen */
68
69 if (next->context == 0)
70 next->context = alloc_sid();
71
72 switch_mm(prev,next,current);
73}
74#endif