Merge tag 'kvm-x86-no_assignment-6.17' of https://github.com/kvm-x86/linux into HEAD

KVM VFIO device assignment cleanups for 6.17

Kill off kvm_arch_{start,end}_assignment() and x86's associated tracking now
that KVM no longer uses assigned_device_count as a bad heuristic for "VM has
an irqbypass producer" or for "VM has access to host MMIO".
This commit is contained in:
Paolo Bonzini 2025-07-28 11:08:56 -04:00
commit 9de13951d5
5 changed files with 1 additions and 49 deletions

View file

@ -1387,8 +1387,6 @@ struct kvm_arch {
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
atomic_t noncoherent_dma_count;
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
atomic_t assigned_device_count;
unsigned long nr_possible_bypass_irqs;
#ifdef CONFIG_KVM_IOAPIC

View file

@ -565,8 +565,6 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
struct kvm *kvm = irqfd->kvm;
int ret = 0;
kvm_arch_start_assignment(irqfd->kvm);
spin_lock_irq(&kvm->irqfds.lock);
irqfd->producer = prod;
@ -575,10 +573,8 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
ret = kvm_pi_update_irte(irqfd, &irqfd->irq_entry);
if (ret) {
if (ret)
kvm->arch.nr_possible_bypass_irqs--;
kvm_arch_end_assignment(irqfd->kvm);
}
}
spin_unlock_irq(&kvm->irqfds.lock);
@ -614,9 +610,6 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
kvm->arch.nr_possible_bypass_irqs--;
spin_unlock_irq(&kvm->irqfds.lock);
kvm_arch_end_assignment(irqfd->kvm);
}
void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,

View file

@ -13444,24 +13444,6 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
}
void kvm_arch_start_assignment(struct kvm *kvm)
{
atomic_inc(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
void kvm_arch_end_assignment(struct kvm *kvm)
{
atomic_dec(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
{
return raw_atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
{
/*

View file

@ -1690,24 +1690,6 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
void kvm_arch_start_assignment(struct kvm *kvm);
void kvm_arch_end_assignment(struct kvm *kvm);
bool kvm_arch_has_assigned_device(struct kvm *kvm);
#else
static inline void kvm_arch_start_assignment(struct kvm *kvm)
{
}
static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}
static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return false;
}
#endif
static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{

View file

@ -175,7 +175,6 @@ static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
kvf->file = get_file(filp);
list_add_tail(&kvf->node, &kv->file_list);
kvm_arch_start_assignment(dev->kvm);
kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
kvm_vfio_update_coherency(dev);
@ -205,7 +204,6 @@ static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
continue;
list_del(&kvf->node);
kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
@ -336,7 +334,6 @@ static void kvm_vfio_release(struct kvm_device *dev)
fput(kvf->file);
list_del(&kvf->node);
kfree(kvf);
kvm_arch_end_assignment(dev->kvm);
}
kvm_vfio_update_coherency(dev);