Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Marcelo Tosatti. * git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: lock slots_lock around device assignment KVM: VMX: Fix kvm_set_shared_msr() called in preemptible context KVM: unmap pages from the iommu when slots are removed KVM: PMU emulation: GLOBAL_CTRL MSR should be enabled on reset
This commit is contained in:
commit
9e01297ee1
@ -459,17 +459,17 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
|
||||
|
||||
if (pmu->version == 1) {
|
||||
pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
|
||||
return;
|
||||
pmu->nr_arch_fixed_counters = 0;
|
||||
} else {
|
||||
pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
|
||||
X86_PMC_MAX_FIXED);
|
||||
pmu->counter_bitmask[KVM_PMC_FIXED] =
|
||||
((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
|
||||
}
|
||||
|
||||
pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
|
||||
X86_PMC_MAX_FIXED);
|
||||
pmu->counter_bitmask[KVM_PMC_FIXED] =
|
||||
((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
|
||||
pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
|
||||
| (((1ull << pmu->nr_arch_fixed_counters) - 1)
|
||||
<< X86_PMC_IDX_FIXED));
|
||||
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
|
||||
(((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED);
|
||||
pmu->global_ctrl_mask = ~pmu->global_ctrl;
|
||||
}
|
||||
|
||||
void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
||||
|
@ -2210,9 +2210,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
||||
msr = find_msr_entry(vmx, msr_index);
|
||||
if (msr) {
|
||||
msr->data = data;
|
||||
if (msr - vmx->guest_msrs < vmx->save_nmsrs)
|
||||
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
|
||||
preempt_disable();
|
||||
kvm_set_shared_msr(msr->index, msr->data,
|
||||
msr->mask);
|
||||
preempt_enable();
|
||||
}
|
||||
break;
|
||||
}
|
||||
ret = kvm_set_msr_common(vcpu, msr_index, data);
|
||||
|
@ -596,6 +596,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
||||
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
||||
int kvm_iommu_map_guest(struct kvm *kvm);
|
||||
int kvm_iommu_unmap_guest(struct kvm *kvm);
|
||||
int kvm_assign_device(struct kvm *kvm,
|
||||
@ -609,6 +610,11 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int kvm_iommu_map_guest(struct kvm *kvm)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
@ -240,9 +240,13 @@ int kvm_iommu_map_guest(struct kvm *kvm)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
|
||||
if (!kvm->arch.iommu_domain)
|
||||
return -ENOMEM;
|
||||
if (!kvm->arch.iommu_domain) {
|
||||
r = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!allow_unsafe_assigned_interrupts &&
|
||||
!iommu_domain_has_cap(kvm->arch.iommu_domain,
|
||||
@ -253,17 +257,16 @@ int kvm_iommu_map_guest(struct kvm *kvm)
|
||||
" module option.\n", __func__);
|
||||
iommu_domain_free(kvm->arch.iommu_domain);
|
||||
kvm->arch.iommu_domain = NULL;
|
||||
return -EPERM;
|
||||
r = -EPERM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
r = kvm_iommu_map_memslots(kvm);
|
||||
if (r)
|
||||
goto out_unmap;
|
||||
kvm_iommu_unmap_memslots(kvm);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
kvm_iommu_unmap_memslots(kvm);
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -310,6 +313,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||
{
|
||||
kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
|
||||
}
|
||||
|
||||
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
|
||||
{
|
||||
int idx;
|
||||
@ -320,7 +328,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages);
|
||||
kvm_iommu_unmap_pages(kvm, memslot);
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
@ -335,7 +343,11 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
|
||||
if (!domain)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
kvm_iommu_unmap_memslots(kvm);
|
||||
kvm->arch.iommu_domain = NULL;
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
iommu_domain_free(domain);
|
||||
return 0;
|
||||
}
|
||||
|
@ -808,12 +808,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
||||
if (r)
|
||||
goto out_free;
|
||||
|
||||
/* map the pages in iommu page table */
|
||||
/* map/unmap the pages in iommu page table */
|
||||
if (npages) {
|
||||
r = kvm_iommu_map_pages(kvm, &new);
|
||||
if (r)
|
||||
goto out_free;
|
||||
}
|
||||
} else
|
||||
kvm_iommu_unmap_pages(kvm, &old);
|
||||
|
||||
r = -ENOMEM;
|
||||
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
|
||||
|
Loading…
Reference in New Issue
Block a user