mirror of
https://github.com/torvalds/linux.git
synced 2025-08-16 06:31:34 +02:00
RISC-V: KVM: Fix kvm_riscv_vcpu_timer_pending() for Sstc
The kvm_riscv_vcpu_timer_pending() checks per-VCPU next_cycles
and per-VCPU software injected VS timer interrupt. This function
returns incorrect value when Sstc is available because the per-VCPU
next_cycles are only updated by kvm_riscv_vcpu_timer_save() called
from kvm_arch_vcpu_put(). As a result, when Sstc is available the
VCPU does not block properly upon WFI traps.
To fix the above issue, we introduce kvm_riscv_vcpu_timer_sync()
which will update per-VCPU next_cycles upon every VM exit instead
of kvm_riscv_vcpu_timer_save().
Fixes: 8f5cb44b1b
("RISC-V: KVM: Support sstc extension")
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
parent
5c20a3a9df
commit
cea8896bd9
3 changed files with 24 additions and 7 deletions
|
@ -45,6 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
|
||||||
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
|
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
|
||||||
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
|
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
|
||||||
void kvm_riscv_guest_timer_init(struct kvm *kvm);
|
void kvm_riscv_guest_timer_init(struct kvm *kvm);
|
||||||
|
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
|
||||||
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
|
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
|
||||||
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
|
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
|
|
@ -708,6 +708,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
|
||||||
clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
|
clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Sync-up timer CSRs */
|
||||||
|
kvm_riscv_vcpu_timer_sync(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
|
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
|
||||||
|
|
|
@ -320,6 +320,21 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
|
||||||
kvm_riscv_vcpu_timer_unblocking(vcpu);
|
kvm_riscv_vcpu_timer_unblocking(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
|
||||||
|
|
||||||
|
if (!t->sstc_enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
#if defined(CONFIG_32BIT)
|
||||||
|
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
||||||
|
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
|
||||||
|
#else
|
||||||
|
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
|
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
|
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
|
||||||
|
@ -327,13 +342,11 @@ void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
|
||||||
if (!t->sstc_enabled)
|
if (!t->sstc_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
t = &vcpu->arch.timer;
|
/*
|
||||||
#if defined(CONFIG_32BIT)
|
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
|
||||||
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
* upon every VM exit so no need to save here.
|
||||||
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
|
*/
|
||||||
#else
|
|
||||||
t->next_cycles = csr_read(CSR_VSTIMECMP);
|
|
||||||
#endif
|
|
||||||
/* timer should be enabled for the remaining operations */
|
/* timer should be enabled for the remaining operations */
|
||||||
if (unlikely(!t->init_done))
|
if (unlikely(!t->init_done))
|
||||||
return;
|
return;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue