KVM: Convert kvm->requests_lock to raw_spinlock_t
The code relies on kvm->requests_lock inhibiting preemption.
Noted by Jan Kiszka.
Signed-off-by: Avi Kivity <avi@redhat.com>
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3145b28..a3fd0f9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -161,7 +161,7 @@
struct kvm {
spinlock_t mmu_lock;
- spinlock_t requests_lock;
+ raw_spinlock_t requests_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0a360c2..548f925 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -138,7 +138,7 @@
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
- spin_lock(&kvm->requests_lock);
+ raw_spin_lock(&kvm->requests_lock);
me = smp_processor_id();
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(req, &vcpu->requests))
@@ -153,7 +153,7 @@
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
- spin_unlock(&kvm->requests_lock);
+ raw_spin_unlock(&kvm->requests_lock);
free_cpumask_var(cpus);
return called;
}
@@ -409,7 +409,7 @@
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
- spin_lock_init(&kvm->requests_lock);
+ raw_spin_lock_init(&kvm->requests_lock);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);