275 lines
7.6 KiB
Diff
275 lines
7.6 KiB
Diff
From: Junaid Shahid <junaids@google.com>
|
|
Date: Thu, 3 Jan 2019 17:14:28 -0800
|
|
Subject: kvm: Convert kvm_lock to a mutex
|
|
|
|
commit 0d9ce162cf46c99628cc5da9510b959c7976735b upstream
|
|
|
|
It doesn't seem as if there is any particular need for kvm_lock to be a
|
|
spinlock, so convert the lock to a mutex so that sleepable functions (in
|
|
particular cond_resched()) can be called while holding it.
|
|
|
|
Signed-off-by: Junaid Shahid <junaids@google.com>
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
Documentation/virtual/kvm/locking.txt | 4 +---
|
|
arch/s390/kvm/kvm-s390.c | 4 ++--
|
|
arch/x86/kvm/mmu.c | 4 ++--
|
|
arch/x86/kvm/x86.c | 14 ++++++-------
|
|
include/linux/kvm_host.h | 2 +-
|
|
virt/kvm/kvm_main.c | 30 +++++++++++++--------------
|
|
6 files changed, 28 insertions(+), 30 deletions(-)
|
|
|
|
Index: linux/Documentation/virtual/kvm/locking.txt
|
|
===================================================================
|
|
--- linux.orig/Documentation/virtual/kvm/locking.txt
|
|
+++ linux/Documentation/virtual/kvm/locking.txt
|
|
@@ -15,8 +15,6 @@ The acquisition orders for mutexes are a
|
|
|
|
On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
|
|
|
|
-For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
|
|
-
|
|
Everything else is a leaf: no other lock is taken inside the critical
|
|
sections.
|
|
|
|
@@ -169,7 +167,7 @@ which time it will be set using the Dirt
|
|
------------
|
|
|
|
Name: kvm_lock
|
|
-Type: spinlock_t
|
|
+Type: mutex
|
|
Arch: any
|
|
Protects: - vm_list
|
|
|
|
Index: linux/arch/s390/kvm/kvm-s390.c
|
|
===================================================================
|
|
--- linux.orig/arch/s390/kvm/kvm-s390.c
|
|
+++ linux/arch/s390/kvm/kvm-s390.c
|
|
@@ -2110,13 +2110,13 @@ int kvm_arch_init_vm(struct kvm *kvm, un
|
|
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
|
|
if (!kvm->arch.sca)
|
|
goto out_err;
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
sca_offset += 16;
|
|
if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
|
|
sca_offset = 0;
|
|
kvm->arch.sca = (struct bsca_block *)
|
|
((char *) kvm->arch.sca + sca_offset);
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
|
|
sprintf(debug_name, "kvm-%u", current->pid);
|
|
|
|
Index: linux/arch/x86/kvm/mmu.c
|
|
===================================================================
|
|
--- linux.orig/arch/x86/kvm/mmu.c
|
|
+++ linux/arch/x86/kvm/mmu.c
|
|
@@ -5819,7 +5819,7 @@ mmu_shrink_scan(struct shrinker *shrink,
|
|
int nr_to_scan = sc->nr_to_scan;
|
|
unsigned long freed = 0;
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
int idx;
|
|
@@ -5869,7 +5869,7 @@ unlock:
|
|
break;
|
|
}
|
|
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
return freed;
|
|
}
|
|
|
|
Index: linux/arch/x86/kvm/x86.c
|
|
===================================================================
|
|
--- linux.orig/arch/x86/kvm/x86.c
|
|
+++ linux/arch/x86/kvm/x86.c
|
|
@@ -6529,7 +6529,7 @@ static void kvm_hyperv_tsc_notifier(void
|
|
struct kvm_vcpu *vcpu;
|
|
int cpu;
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list)
|
|
kvm_make_mclock_inprogress_request(kvm);
|
|
|
|
@@ -6555,7 +6555,7 @@ static void kvm_hyperv_tsc_notifier(void
|
|
|
|
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
|
}
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
}
|
|
#endif
|
|
|
|
@@ -6613,17 +6613,17 @@ static int kvmclock_cpufreq_notifier(str
|
|
|
|
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
if (vcpu->cpu != freq->cpu)
|
|
continue;
|
|
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
|
- if (vcpu->cpu != smp_processor_id())
|
|
+ if (vcpu->cpu != raw_smp_processor_id())
|
|
send_ipi = 1;
|
|
}
|
|
}
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
|
|
if (freq->old < freq->new && send_ipi) {
|
|
/*
|
|
@@ -6749,12 +6749,12 @@ static void pvclock_gtod_update_fn(struc
|
|
struct kvm_vcpu *vcpu;
|
|
int i;
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list)
|
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
|
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
|
|
atomic_set(&kvm_guest_has_master_clock, 0);
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
}
|
|
|
|
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
|
|
Index: linux/include/linux/kvm_host.h
|
|
===================================================================
|
|
--- linux.orig/include/linux/kvm_host.h
|
|
+++ linux/include/linux/kvm_host.h
|
|
@@ -141,7 +141,7 @@ static inline bool is_error_page(struct
|
|
|
|
extern struct kmem_cache *kvm_vcpu_cache;
|
|
|
|
-extern spinlock_t kvm_lock;
|
|
+extern struct mutex kvm_lock;
|
|
extern struct list_head vm_list;
|
|
|
|
struct kvm_io_range {
|
|
Index: linux/virt/kvm/kvm_main.c
|
|
===================================================================
|
|
--- linux.orig/virt/kvm/kvm_main.c
|
|
+++ linux/virt/kvm/kvm_main.c
|
|
@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
|
|
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
|
|
*/
|
|
|
|
-DEFINE_SPINLOCK(kvm_lock);
|
|
+DEFINE_MUTEX(kvm_lock);
|
|
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
|
|
LIST_HEAD(vm_list);
|
|
|
|
@@ -685,9 +685,9 @@ static struct kvm *kvm_create_vm(unsigne
|
|
if (r)
|
|
goto out_err;
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_add(&kvm->vm_list, &vm_list);
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
|
|
preempt_notifier_inc();
|
|
|
|
@@ -733,9 +733,9 @@ static void kvm_destroy_vm(struct kvm *k
|
|
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
|
|
kvm_destroy_vm_debugfs(kvm);
|
|
kvm_arch_sync_events(kvm);
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_del(&kvm->vm_list);
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
kvm_free_irq_routing(kvm);
|
|
for (i = 0; i < KVM_NR_BUSES; i++) {
|
|
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
|
|
@@ -3831,13 +3831,13 @@ static int vm_stat_get(void *_offset, u6
|
|
u64 tmp_val;
|
|
|
|
*val = 0;
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
stat_tmp.kvm = kvm;
|
|
vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
|
|
*val += tmp_val;
|
|
}
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -3850,12 +3850,12 @@ static int vm_stat_clear(void *_offset,
|
|
if (val)
|
|
return -EINVAL;
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
stat_tmp.kvm = kvm;
|
|
vm_stat_clear_per_vm((void *)&stat_tmp, 0);
|
|
}
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
|
|
return 0;
|
|
}
|
|
@@ -3870,13 +3870,13 @@ static int vcpu_stat_get(void *_offset,
|
|
u64 tmp_val;
|
|
|
|
*val = 0;
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
stat_tmp.kvm = kvm;
|
|
vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
|
|
*val += tmp_val;
|
|
}
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
return 0;
|
|
}
|
|
|
|
@@ -3889,12 +3889,12 @@ static int vcpu_stat_clear(void *_offset
|
|
if (val)
|
|
return -EINVAL;
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
stat_tmp.kvm = kvm;
|
|
vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
|
|
}
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
|
|
return 0;
|
|
}
|
|
@@ -3915,7 +3915,7 @@ static void kvm_uevent_notify_change(uns
|
|
if (!kvm_dev.this_device || !kvm)
|
|
return;
|
|
|
|
- spin_lock(&kvm_lock);
|
|
+ mutex_lock(&kvm_lock);
|
|
if (type == KVM_EVENT_CREATE_VM) {
|
|
kvm_createvm_count++;
|
|
kvm_active_vms++;
|
|
@@ -3924,7 +3924,7 @@ static void kvm_uevent_notify_change(uns
|
|
}
|
|
created = kvm_createvm_count;
|
|
active = kvm_active_vms;
|
|
- spin_unlock(&kvm_lock);
|
|
+ mutex_unlock(&kvm_lock);
|
|
|
|
env = kzalloc(sizeof(*env), GFP_KERNEL);
|
|
if (!env)
|