| #ifndef __KVM_H |
| #define __KVM_H |
| |
| /* |
| * This work is licensed under the terms of the GNU GPL, version 2. See |
| * the COPYING file in the top-level directory. |
| */ |
| |
| #include <linux/types.h> |
| #include <linux/hardirq.h> |
| #include <linux/list.h> |
| #include <linux/mutex.h> |
| #include <linux/spinlock.h> |
| #include <linux/signal.h> |
| #include <linux/sched.h> |
| #include <linux/mm.h> |
| #include <linux/preempt.h> |
| #include <asm/signal.h> |
| |
| #include <linux/kvm.h> |
| #include <linux/kvm_para.h> |
| |
| #include "types.h" |
| |
| #include "x86.h" |
| |
| #define KVM_MAX_VCPUS 4 |
| #define KVM_MEMORY_SLOTS 8 |
| /* memory slots that does not exposed to userspace */ |
| #define KVM_PRIVATE_MEM_SLOTS 4 |
| |
| #define KVM_PIO_PAGE_OFFSET 1 |
| |
| /* |
| * vcpu->requests bit members |
| */ |
| #define KVM_REQ_TLB_FLUSH 0 |
| |
| |
| struct kvm_vcpu; |
| extern struct kmem_cache *kvm_vcpu_cache; |
| |
| struct kvm_guest_debug { |
| int enabled; |
| unsigned long bp[4]; |
| int singlestep; |
| }; |
| |
| /* |
| * It would be nice to use something smarter than a linear search, TBD... |
| * Thankfully we dont expect many devices to register (famous last words :), |
| * so until then it will suffice. At least its abstracted so we can change |
| * in one place. |
| */ |
| struct kvm_io_bus { |
| int dev_count; |
| #define NR_IOBUS_DEVS 6 |
| struct kvm_io_device *devs[NR_IOBUS_DEVS]; |
| }; |
| |
| void kvm_io_bus_init(struct kvm_io_bus *bus); |
| void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
| struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); |
| void kvm_io_bus_register_dev(struct kvm_io_bus *bus, |
| struct kvm_io_device *dev); |
| |
| struct kvm_vcpu { |
| struct kvm *kvm; |
| struct preempt_notifier preempt_notifier; |
| int vcpu_id; |
| struct mutex mutex; |
| int cpu; |
| struct kvm_run *run; |
| int guest_mode; |
| unsigned long requests; |
| struct kvm_guest_debug guest_debug; |
| int fpu_active; |
| int guest_fpu_loaded; |
| wait_queue_head_t wq; |
| int sigset_active; |
| sigset_t sigset; |
| struct kvm_vcpu_stat stat; |
| |
| #ifdef CONFIG_HAS_IOMEM |
| int mmio_needed; |
| int mmio_read_completed; |
| int mmio_is_write; |
| int mmio_size; |
| unsigned char mmio_data[8]; |
| gpa_t mmio_phys_addr; |
| #endif |
| |
| struct kvm_vcpu_arch arch; |
| }; |
| |
| struct kvm_memory_slot { |
| gfn_t base_gfn; |
| unsigned long npages; |
| unsigned long flags; |
| unsigned long *rmap; |
| unsigned long *dirty_bitmap; |
| unsigned long userspace_addr; |
| int user_alloc; |
| }; |
| |
| struct kvm { |
| struct mutex lock; /* protects everything except vcpus */ |
| struct mm_struct *mm; /* userspace tied to this vm */ |
| int nmemslots; |
| struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
| KVM_PRIVATE_MEM_SLOTS]; |
| struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
| struct list_head vm_list; |
| struct file *filp; |
| struct kvm_io_bus mmio_bus; |
| struct kvm_io_bus pio_bus; |
| struct kvm_vm_stat stat; |
| struct kvm_arch arch; |
| }; |
| |
| /* The guest did something we don't support. */ |
| #define pr_unimpl(vcpu, fmt, ...) \ |
| do { \ |
| if (printk_ratelimit()) \ |
| printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ |
| current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ |
| } while (0) |
| |
| #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) |
| #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) |
| |
| int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
| void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
| |
| void vcpu_load(struct kvm_vcpu *vcpu); |
| void vcpu_put(struct kvm_vcpu *vcpu); |
| |
| void decache_vcpus_on_cpu(int cpu); |
| |
| |
| int kvm_init(void *opaque, unsigned int vcpu_size, |
| struct module *module); |
| void kvm_exit(void); |
| |
| #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |
| #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) |
| static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } |
| struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); |
| |
| extern struct page *bad_page; |
| |
| int is_error_page(struct page *page); |
| int kvm_is_error_hva(unsigned long addr); |
| int kvm_set_memory_region(struct kvm *kvm, |
| struct kvm_userspace_memory_region *mem, |
| int user_alloc); |
| int __kvm_set_memory_region(struct kvm *kvm, |
| struct kvm_userspace_memory_region *mem, |
| int user_alloc); |
| int kvm_arch_set_memory_region(struct kvm *kvm, |
| struct kvm_userspace_memory_region *mem, |
| struct kvm_memory_slot old, |
| int user_alloc); |
| gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
| struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
| void kvm_release_page_clean(struct page *page); |
| void kvm_release_page_dirty(struct page *page); |
| int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
| int len); |
| int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
| int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
| int offset, int len); |
| int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
| unsigned long len); |
| int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
| int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
| struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
| int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
| void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
| |
| void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
| void kvm_resched(struct kvm_vcpu *vcpu); |
| void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
| void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
| void kvm_flush_remote_tlbs(struct kvm *kvm); |
| |
| long kvm_arch_dev_ioctl(struct file *filp, |
| unsigned int ioctl, unsigned long arg); |
| long kvm_arch_vcpu_ioctl(struct file *filp, |
| unsigned int ioctl, unsigned long arg); |
| void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
| void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
| |
| int kvm_dev_ioctl_check_extension(long ext); |
| |
| int kvm_get_dirty_log(struct kvm *kvm, |
| struct kvm_dirty_log *log, int *is_dirty); |
| int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| struct kvm_dirty_log *log); |
| |
| int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
| struct |
| kvm_userspace_memory_region *mem, |
| int user_alloc); |
| long kvm_arch_vm_ioctl(struct file *filp, |
| unsigned int ioctl, unsigned long arg); |
| void kvm_arch_destroy_vm(struct kvm *kvm); |
| |
| int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
| int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
| |
| int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
| struct kvm_translation *tr); |
| |
| int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
| int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
| int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
| struct kvm_sregs *sregs); |
| int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
| struct kvm_sregs *sregs); |
| int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |
| struct kvm_debug_guest *dbg); |
| int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
| |
| int kvm_arch_init(void *opaque); |
| void kvm_arch_exit(void); |
| |
| int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
| void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
| |
| void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); |
| void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
| void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
| struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
| int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
| void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
| |
| int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); |
| void kvm_arch_hardware_enable(void *garbage); |
| void kvm_arch_hardware_disable(void *garbage); |
| int kvm_arch_hardware_setup(void); |
| void kvm_arch_hardware_unsetup(void); |
| void kvm_arch_check_processor_compat(void *rtn); |
| int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
| |
| void kvm_free_physmem(struct kvm *kvm); |
| |
| struct kvm *kvm_arch_create_vm(void); |
| void kvm_arch_destroy_vm(struct kvm *kvm); |
| |
| int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
| int kvm_cpu_has_interrupt(struct kvm_vcpu *v); |
| |
| static inline void kvm_guest_enter(void) |
| { |
| account_system_vtime(current); |
| current->flags |= PF_VCPU; |
| } |
| |
| static inline void kvm_guest_exit(void) |
| { |
| account_system_vtime(current); |
| current->flags &= ~PF_VCPU; |
| } |
| |
| static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) |
| { |
| return slot - kvm->memslots; |
| } |
| |
| static inline gpa_t gfn_to_gpa(gfn_t gfn) |
| { |
| return (gpa_t)gfn << PAGE_SHIFT; |
| } |
| |
| enum kvm_stat_kind { |
| KVM_STAT_VM, |
| KVM_STAT_VCPU, |
| }; |
| |
| struct kvm_stats_debugfs_item { |
| const char *name; |
| int offset; |
| enum kvm_stat_kind kind; |
| struct dentry *dentry; |
| }; |
| extern struct kvm_stats_debugfs_item debugfs_entries[]; |
| |
| #endif |