提交 21e60759 编写于 作者: P Paolo Bonzini

Merge tag 'kvm-riscv-fixes-6.1-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv fixes for 6.1, take #1

- Fix compilation without RISCV_ISA_ZICBOM
- Fix kvm_riscv_vcpu_timer_pending() for Sstc
...@@ -42,16 +42,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local); ...@@ -42,16 +42,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/*
* The T-Head CMO errata internally probe the CBOM block size, but otherwise
* don't depend on Zicbom.
*/
extern unsigned int riscv_cbom_block_size; extern unsigned int riscv_cbom_block_size;
#ifdef CONFIG_RISCV_ISA_ZICBOM
void riscv_init_cbom_blocksize(void); void riscv_init_cbom_blocksize(void);
#else
static inline void riscv_init_cbom_blocksize(void) { }
#endif
#ifdef CONFIG_RISCV_DMA_NONCOHERENT #ifdef CONFIG_RISCV_DMA_NONCOHERENT
void riscv_noncoherent_supported(void); void riscv_noncoherent_supported(void);
......
...@@ -45,6 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); ...@@ -45,6 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
void kvm_riscv_guest_timer_init(struct kvm *kvm); void kvm_riscv_guest_timer_init(struct kvm *kvm);
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu); bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
......
...@@ -708,6 +708,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) ...@@ -708,6 +708,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
clear_bit(IRQ_VS_SOFT, &v->irqs_pending); clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
} }
} }
/* Sync-up timer CSRs */
kvm_riscv_vcpu_timer_sync(vcpu);
} }
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
......
...@@ -320,20 +320,33 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) ...@@ -320,20 +320,33 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_timer_unblocking(vcpu); kvm_riscv_vcpu_timer_unblocking(vcpu);
} }
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_timer *t = &vcpu->arch.timer; struct kvm_vcpu_timer *t = &vcpu->arch.timer;
if (!t->sstc_enabled) if (!t->sstc_enabled)
return; return;
t = &vcpu->arch.timer;
#if defined(CONFIG_32BIT) #if defined(CONFIG_32BIT)
t->next_cycles = csr_read(CSR_VSTIMECMP); t->next_cycles = csr_read(CSR_VSTIMECMP);
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
#else #else
t->next_cycles = csr_read(CSR_VSTIMECMP); t->next_cycles = csr_read(CSR_VSTIMECMP);
#endif #endif
}
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
if (!t->sstc_enabled)
return;
/*
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
* upon every VM exit so no need to save here.
*/
/* timer should be enabled for the remaining operations */ /* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done)) if (unlikely(!t->init_done))
return; return;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Copyright (C) 2017 SiFive * Copyright (C) 2017 SiFive
*/ */
#include <linux/of.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -86,3 +87,40 @@ void flush_icache_pte(pte_t pte) ...@@ -86,3 +87,40 @@ void flush_icache_pte(pte_t pte)
flush_icache_all(); flush_icache_all();
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
unsigned int riscv_cbom_block_size;
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
void riscv_init_cbom_blocksize(void)
{
struct device_node *node;
unsigned long cbom_hartid;
u32 val, probed_block_size;
int ret;
probed_block_size = 0;
for_each_of_cpu_node(node) {
unsigned long hartid;
ret = riscv_of_processor_hartid(node, &hartid);
if (ret)
continue;
/* set block-size for cbom extension if available */
ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
if (ret)
continue;
if (!probed_block_size) {
probed_block_size = val;
cbom_hartid = hartid;
} else {
if (probed_block_size != val)
pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
cbom_hartid, hartid);
}
}
if (probed_block_size)
riscv_cbom_block_size = probed_block_size;
}
...@@ -8,13 +8,8 @@ ...@@ -8,13 +8,8 @@
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
unsigned int riscv_cbom_block_size;
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
static bool noncoherent_supported; static bool noncoherent_supported;
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
...@@ -77,42 +72,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -77,42 +72,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->dma_coherent = coherent; dev->dma_coherent = coherent;
} }
#ifdef CONFIG_RISCV_ISA_ZICBOM
void riscv_init_cbom_blocksize(void)
{
struct device_node *node;
unsigned long cbom_hartid;
u32 val, probed_block_size;
int ret;
probed_block_size = 0;
for_each_of_cpu_node(node) {
unsigned long hartid;
ret = riscv_of_processor_hartid(node, &hartid);
if (ret)
continue;
/* set block-size for cbom extension if available */
ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
if (ret)
continue;
if (!probed_block_size) {
probed_block_size = val;
cbom_hartid = hartid;
} else {
if (probed_block_size != val)
pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
cbom_hartid, hartid);
}
}
if (probed_block_size)
riscv_cbom_block_size = probed_block_size;
}
#endif
void riscv_noncoherent_supported(void) void riscv_noncoherent_supported(void)
{ {
WARN(!riscv_cbom_block_size, WARN(!riscv_cbom_block_size,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册