blob: 4703f129e95e1153570129a0a0f0fc79c34b0f1e [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * access guest memory
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
18#include <asm/uaccess.h>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020019#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020
21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
Martin Schwidefsky00963692008-07-25 15:51:00 +020022 unsigned long guestaddr)
Heiko Carstensb0c632d2008-03-25 18:47:20 +010023{
Martin Schwidefsky00963692008-07-25 15:51:00 +020024 unsigned long prefix = vcpu->arch.sie_block->prefix;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025
26 if (guestaddr < 2 * PAGE_SIZE)
27 guestaddr += prefix;
28 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
29 guestaddr -= prefix;
30
Carsten Otte092670c2011-07-24 10:48:22 +020031 return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
Heiko Carstensb0c632d2008-03-25 18:47:20 +010032}
33
Martin Schwidefsky00963692008-07-25 15:51:00 +020034static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035 u64 *result)
36{
37 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
38
39 BUG_ON(guestaddr & 7);
40
41 if (IS_ERR((void __force *) uptr))
42 return PTR_ERR((void __force *) uptr);
43
Martin Schwidefsky00963692008-07-25 15:51:00 +020044 return get_user(*result, (unsigned long __user *) uptr);
Heiko Carstensb0c632d2008-03-25 18:47:20 +010045}
46
Martin Schwidefsky00963692008-07-25 15:51:00 +020047static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048 u32 *result)
49{
50 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
51
52 BUG_ON(guestaddr & 3);
53
54 if (IS_ERR((void __force *) uptr))
55 return PTR_ERR((void __force *) uptr);
56
57 return get_user(*result, (u32 __user *) uptr);
58}
59
Martin Schwidefsky00963692008-07-25 15:51:00 +020060static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061 u16 *result)
62{
63 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
64
65 BUG_ON(guestaddr & 1);
66
67 if (IS_ERR(uptr))
68 return PTR_ERR(uptr);
69
70 return get_user(*result, (u16 __user *) uptr);
71}
72
Martin Schwidefsky00963692008-07-25 15:51:00 +020073static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010074 u8 *result)
75{
76 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
77
78 if (IS_ERR((void __force *) uptr))
79 return PTR_ERR((void __force *) uptr);
80
81 return get_user(*result, (u8 __user *) uptr);
82}
83
Martin Schwidefsky00963692008-07-25 15:51:00 +020084static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010085 u64 value)
86{
87 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
88
89 BUG_ON(guestaddr & 7);
90
91 if (IS_ERR((void __force *) uptr))
92 return PTR_ERR((void __force *) uptr);
93
94 return put_user(value, (u64 __user *) uptr);
95}
96
Martin Schwidefsky00963692008-07-25 15:51:00 +020097static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010098 u32 value)
99{
100 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
101
102 BUG_ON(guestaddr & 3);
103
104 if (IS_ERR((void __force *) uptr))
105 return PTR_ERR((void __force *) uptr);
106
107 return put_user(value, (u32 __user *) uptr);
108}
109
Martin Schwidefsky00963692008-07-25 15:51:00 +0200110static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100111 u16 value)
112{
113 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
114
115 BUG_ON(guestaddr & 1);
116
117 if (IS_ERR((void __force *) uptr))
118 return PTR_ERR((void __force *) uptr);
119
120 return put_user(value, (u16 __user *) uptr);
121}
122
Martin Schwidefsky00963692008-07-25 15:51:00 +0200123static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100124 u8 value)
125{
126 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
127
128 if (IS_ERR((void __force *) uptr))
129 return PTR_ERR((void __force *) uptr);
130
131 return put_user(value, (u8 __user *) uptr);
132}
133
134
Martin Schwidefsky00963692008-07-25 15:51:00 +0200135static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
136 unsigned long guestdest,
Carsten Otte092670c2011-07-24 10:48:22 +0200137 void *from, unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100138{
139 int rc;
140 unsigned long i;
Carsten Otte092670c2011-07-24 10:48:22 +0200141 u8 *data = from;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100142
143 for (i = 0; i < n; i++) {
144 rc = put_guest_u8(vcpu, guestdest++, *(data++));
145 if (rc < 0)
146 return rc;
147 }
148 return 0;
149}
150
Carsten Otte092670c2011-07-24 10:48:22 +0200151static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152 unsigned long guestdest,
153 void *from, unsigned long n)
154{
155 int r;
156 void __user *uptr;
157 unsigned long size;
158
159 if (guestdest + n < guestdest)
160 return -EFAULT;
161
162 /* simple case: all within one segment table entry? */
163 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165
166 if (IS_ERR((void __force *) uptr))
167 return PTR_ERR((void __force *) uptr);
168
169 r = copy_to_user(uptr, from, n);
170
171 if (r)
172 r = -EFAULT;
173
174 goto out;
175 }
176
177 /* copy first segment */
178 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179
180 if (IS_ERR((void __force *) uptr))
181 return PTR_ERR((void __force *) uptr);
182
183 size = PMD_SIZE - (guestdest & ~PMD_MASK);
184
185 r = copy_to_user(uptr, from, size);
186
187 if (r) {
188 r = -EFAULT;
189 goto out;
190 }
191 from += size;
192 n -= size;
193 guestdest += size;
194
195 /* copy full segments */
196 while (n >= PMD_SIZE) {
197 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
198
199 if (IS_ERR((void __force *) uptr))
200 return PTR_ERR((void __force *) uptr);
201
202 r = copy_to_user(uptr, from, PMD_SIZE);
203
204 if (r) {
205 r = -EFAULT;
206 goto out;
207 }
208 from += PMD_SIZE;
209 n -= PMD_SIZE;
210 guestdest += PMD_SIZE;
211 }
212
213 /* copy the tail segment */
214 if (n) {
215 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216
217 if (IS_ERR((void __force *) uptr))
218 return PTR_ERR((void __force *) uptr);
219
220 r = copy_to_user(uptr, from, n);
221
222 if (r)
223 r = -EFAULT;
224 }
225out:
226 return r;
227}
228
229static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230 unsigned long guestdest,
231 void *from, unsigned long n)
232{
233 return __copy_to_guest_fast(vcpu, guestdest, from, n);
234}
235
Martin Schwidefsky00963692008-07-25 15:51:00 +0200236static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
Carsten Otte092670c2011-07-24 10:48:22 +0200237 void *from, unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100238{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200239 unsigned long prefix = vcpu->arch.sie_block->prefix;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100240
241 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
242 goto slowpath;
243
244 if ((guestdest < prefix) && (guestdest + n > prefix))
245 goto slowpath;
246
247 if ((guestdest < prefix + 2 * PAGE_SIZE)
248 && (guestdest + n > prefix + 2 * PAGE_SIZE))
249 goto slowpath;
250
251 if (guestdest < 2 * PAGE_SIZE)
252 guestdest += prefix;
253 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
254 guestdest -= prefix;
255
Carsten Otte092670c2011-07-24 10:48:22 +0200256 return __copy_to_guest_fast(vcpu, guestdest, from, n);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100257slowpath:
258 return __copy_to_guest_slow(vcpu, guestdest, from, n);
259}
260
261static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200262 unsigned long guestsrc,
263 unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100264{
265 int rc;
266 unsigned long i;
267 u8 *data = to;
268
269 for (i = 0; i < n; i++) {
270 rc = get_guest_u8(vcpu, guestsrc++, data++);
271 if (rc < 0)
272 return rc;
273 }
274 return 0;
275}
276
Carsten Otte092670c2011-07-24 10:48:22 +0200277static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
278 unsigned long guestsrc,
279 unsigned long n)
280{
281 int r;
282 void __user *uptr;
283 unsigned long size;
284
285 if (guestsrc + n < guestsrc)
286 return -EFAULT;
287
288 /* simple case: all within one segment table entry? */
289 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
291
292 if (IS_ERR((void __force *) uptr))
293 return PTR_ERR((void __force *) uptr);
294
295 r = copy_from_user(to, uptr, n);
296
297 if (r)
298 r = -EFAULT;
299
300 goto out;
301 }
302
303 /* copy first segment */
304 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
305
306 if (IS_ERR((void __force *) uptr))
307 return PTR_ERR((void __force *) uptr);
308
309 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
310
311 r = copy_from_user(to, uptr, size);
312
313 if (r) {
314 r = -EFAULT;
315 goto out;
316 }
317 to += size;
318 n -= size;
319 guestsrc += size;
320
321 /* copy full segments */
322 while (n >= PMD_SIZE) {
323 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324
325 if (IS_ERR((void __force *) uptr))
326 return PTR_ERR((void __force *) uptr);
327
328 r = copy_from_user(to, uptr, PMD_SIZE);
329
330 if (r) {
331 r = -EFAULT;
332 goto out;
333 }
334 to += PMD_SIZE;
335 n -= PMD_SIZE;
336 guestsrc += PMD_SIZE;
337 }
338
339 /* copy the tail segment */
340 if (n) {
341 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
342
343 if (IS_ERR((void __force *) uptr))
344 return PTR_ERR((void __force *) uptr);
345
346 r = copy_from_user(to, uptr, n);
347
348 if (r)
349 r = -EFAULT;
350 }
351out:
352 return r;
353}
354
355static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
356 unsigned long guestsrc,
357 unsigned long n)
358{
359 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
360}
361
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200363 unsigned long guestsrc, unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100364{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200365 unsigned long prefix = vcpu->arch.sie_block->prefix;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100366
367 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
368 goto slowpath;
369
370 if ((guestsrc < prefix) && (guestsrc + n > prefix))
371 goto slowpath;
372
373 if ((guestsrc < prefix + 2 * PAGE_SIZE)
374 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375 goto slowpath;
376
377 if (guestsrc < 2 * PAGE_SIZE)
378 guestsrc += prefix;
379 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380 guestsrc -= prefix;
381
Carsten Otte092670c2011-07-24 10:48:22 +0200382 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100383slowpath:
384 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
385}
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100386#endif