KVM: Introduce kvm_memory_slot::arch and move lpage_info into it
Some members of kvm_memory_slot are not used by every architecture.
This patch is the first step to make this difference clear by
introducing kvm_memory_slot::arch; lpage_info is moved into it.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2689ee5..e35b3a8 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -459,6 +459,9 @@
unsigned long boot_gp;
};
+struct kvm_arch_memory_slot {
+};
+
struct kvm_arch {
spinlock_t dirty_log_lock;
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 8ca7261..d8ddbba 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1571,6 +1571,16 @@
return VM_FAULT_SIGBUS;
}
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ return 0;
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1843d5d..52eb9c1 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -213,6 +213,9 @@
#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
#define KVMPPC_GOT_PAGE 0x80
+struct kvm_arch_memory_slot {
+};
+
struct kvm_arch {
#ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long hpt_virt;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0e21d15..00d7e34 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -281,6 +281,16 @@
return -EINVAL;
}
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ return 0;
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e630426..7343872 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -245,6 +245,9 @@
u32 remote_tlb_flush;
};
+struct kvm_arch_memory_slot {
+};
+
struct kvm_arch{
struct sca_block *sca;
debug_info_t *dbf;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index cf3c0a9..17ad69d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -814,6 +814,16 @@
return VM_FAULT_SIGBUS;
}
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ return 0;
+}
+
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c24125c..74c9edf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -483,6 +483,15 @@
} osvw;
};
+struct kvm_lpage_info {
+ unsigned long rmap_pde;
+ int write_count;
+};
+
+struct kvm_arch_memory_slot {
+ struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+};
+
struct kvm_arch {
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 37e7f10..ff053ca 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -689,7 +689,7 @@
unsigned long idx;
idx = gfn_to_index(gfn, slot->base_gfn, level);
- return &slot->lpage_info[level - 2][idx];
+ return &slot->arch.lpage_info[level - 2][idx];
}
static void account_shadowed(struct kvm *kvm, gfn_t gfn)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3df0b7a..ca74c1d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6239,6 +6239,65 @@
put_page(kvm->arch.ept_identity_pagetable);
}
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+ int i;
+
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+ if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
+ vfree(free->arch.lpage_info[i]);
+ free->arch.lpage_info[i] = NULL;
+ }
+ }
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ int i;
+
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+ unsigned long ugfn;
+ int lpages;
+ int level = i + 2;
+
+ lpages = gfn_to_index(slot->base_gfn + npages - 1,
+ slot->base_gfn, level) + 1;
+
+ slot->arch.lpage_info[i] =
+ vzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
+ if (!slot->arch.lpage_info[i])
+ goto out_free;
+
+ if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
+ slot->arch.lpage_info[i][0].write_count = 1;
+ if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
+ slot->arch.lpage_info[i][lpages - 1].write_count = 1;
+ ugfn = slot->userspace_addr >> PAGE_SHIFT;
+ /*
+ * If the gfn and userspace address are not aligned wrt each
+ * other, or if explicitly asked to, disable large page
+ * support for this slot
+ */
+ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
+ !kvm_largepages_enabled()) {
+ unsigned long j;
+
+ for (j = 0; j < lpages; ++j)
+ slot->arch.lpage_info[i][j].write_count = 1;
+ }
+ }
+
+ return 0;
+
+out_free:
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
+ vfree(slot->arch.lpage_info[i]);
+ slot->arch.lpage_info[i] = NULL;
+ }
+ return -ENOMEM;
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7a08496..355e445 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -171,11 +171,6 @@
*/
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
-struct kvm_lpage_info {
- unsigned long rmap_pde;
- int write_count;
-};
-
struct kvm_memory_slot {
gfn_t base_gfn;
unsigned long npages;
@@ -184,7 +179,7 @@
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_head;
unsigned long nr_dirty_pages;
- struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+ struct kvm_arch_memory_slot arch;
unsigned long userspace_addr;
int user_alloc;
int id;
@@ -376,6 +371,9 @@
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
@@ -385,6 +383,7 @@
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
+bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a30447c..8340e0e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -535,21 +535,13 @@
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
- int i;
-
if (!dont || free->rmap != dont->rmap)
vfree(free->rmap);
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
kvm_destroy_dirty_bitmap(free);
-
- for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
- if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
- vfree(free->lpage_info[i]);
- free->lpage_info[i] = NULL;
- }
- }
+ kvm_arch_free_memslot(free, dont);
free->npages = 0;
free->rmap = NULL;
@@ -685,53 +677,6 @@
slots->generation++;
}
-#ifndef CONFIG_S390
-static int create_lpage_info(struct kvm_memory_slot *slot, unsigned long npages)
-{
- int i;
-
- for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
- unsigned long ugfn;
- int lpages;
- int level = i + 2;
-
- lpages = gfn_to_index(slot->base_gfn + npages - 1,
- slot->base_gfn, level) + 1;
-
- slot->lpage_info[i] = vzalloc(lpages * sizeof(*slot->lpage_info[i]));
- if (!slot->lpage_info[i])
- goto out_free;
-
- if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
- slot->lpage_info[i][0].write_count = 1;
- if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
- slot->lpage_info[i][lpages - 1].write_count = 1;
- ugfn = slot->userspace_addr >> PAGE_SHIFT;
- /*
- * If the gfn and userspace address are not aligned wrt each
- * other, or if explicitly asked to, disable large page
- * support for this slot
- */
- if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
- !largepages_enabled) {
- unsigned long j;
-
- for (j = 0; j < lpages; ++j)
- slot->lpage_info[i][j].write_count = 1;
- }
- }
-
- return 0;
-
-out_free:
- for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
- vfree(slot->lpage_info[i]);
- slot->lpage_info[i] = NULL;
- }
- return -ENOMEM;
-}
-#endif /* not defined CONFIG_S390 */
-
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -819,10 +764,9 @@
new.rmap = vzalloc(npages * sizeof(*new.rmap));
if (!new.rmap)
goto out_free;
-
- if (create_lpage_info(&new, npages))
- goto out_free;
#endif /* not defined CONFIG_S390 */
+ if (kvm_arch_create_memslot(&new, npages))
+ goto out_free;
}
/* Allocate page dirty bitmap if needed */
@@ -880,8 +824,7 @@
if (!npages) {
new.rmap = NULL;
new.dirty_bitmap = NULL;
- for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
- new.lpage_info[i] = NULL;
+ memset(&new.arch, 0, sizeof(new.arch));
}
update_memslots(slots, &new);
@@ -968,6 +911,11 @@
return r;
}
+bool kvm_largepages_enabled(void)
+{
+ return largepages_enabled;
+}
+
void kvm_disable_largepages(void)
{
largepages_enabled = false;