未验证 提交 5d0a525a 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!449 LoongArch: add kvm support

Merge Pull Request from: @Hongchen_Zhang 
 
Added simulation of csr devices, interrupt controller devices, mmu, 
timers, etc. in kvm under loongarch structure.
kvm-based acceleration for qemu applications on 5000 servers or 5000 
desktops with loongarch cpu

Characteristic Details: 
1) cpu 
  - Support up to 64 vcpu, support vcpu hot-plug
2) memory
  - Back-end memory supports normal huge pages and transparent huge pages
  - Support balloon 
  - Support memory hot-swap
3) Peripherals
  - Support standard interfaces PCI, SATA, SCSI, USB, virtio devices
  - Support multiple NICs, multiple disks
  - Supports hot-plug of devices
4) boot
  - Support UEFI bios boot
  - Support boot order
  - Direct kernel boot support
  - Support tpm
5) Migration
  - Support for virtual machine snapshots
  - Support for virtual machine saving and recovery
  - Support for shared storage migration
  - Support for incremental migration
  - Supports full copy migration of storage

Test passed with below step:
1、Install virt-manager
2、Install libvirt
3、Install qemu
4、Download loongarch's qcow2 file
5、Create a virtual machine on virtu-manager using the qcow2 file
6、Start the virtual machine and find that the virtual machine can run normally 
 
Link:https://gitee.com/openeuler/kernel/pulls/449 

Reviewed-by: Guo Dongtai <guodongtai@kylinos.cn> 
Reviewed-by: Kevin Zhu <zhukeqian1@huawei.com> 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -2,5 +2,9 @@ obj-y += kernel/ ...@@ -2,5 +2,9 @@ obj-y += kernel/
obj-y += mm/ obj-y += mm/
obj-y += vdso/ obj-y += vdso/
ifdef CONFIG_KVM
obj-y += kvm/
endif
# for cleaning # for cleaning
subdir- += boot subdir- += boot
...@@ -106,6 +106,7 @@ config LOONGARCH ...@@ -106,6 +106,7 @@ config LOONGARCH
select HAVE_SETUP_PER_CPU_AREA if NUMA select HAVE_SETUP_PER_CPU_AREA if NUMA
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ select HAVE_TIF_NOHZ
select HAVE_KVM
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU select IRQ_LOONGARCH_CPU
...@@ -539,3 +540,4 @@ source "drivers/cpufreq/Kconfig" ...@@ -539,3 +540,4 @@ source "drivers/cpufreq/Kconfig"
endmenu endmenu
source "drivers/firmware/Kconfig" source "drivers/firmware/Kconfig"
source "arch/loongarch/kvm/Kconfig"
...@@ -49,6 +49,11 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y ...@@ -49,6 +49,11 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_LOONGSON3_ACPI_CPUFREQ=y
CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EFI_TEST=m CONFIG_EFI_TEST=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_VHOST_NET=m
CONFIG_VHOST_SCSI=m
CONFIG_VHOST_VSOCK=m
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
......
...@@ -26,4 +26,3 @@ generic-y += poll.h ...@@ -26,4 +26,3 @@ generic-y += poll.h
generic-y += param.h generic-y += param.h
generic-y += posix_types.h generic-y += posix_types.h
generic-y += resource.h generic-y += resource.h
generic-y += kvm_para.h
...@@ -42,17 +42,18 @@ enum reg1i21_op { ...@@ -42,17 +42,18 @@ enum reg1i21_op {
}; };
enum reg2i12_op { enum reg2i12_op {
addiw_op = 0x0a, slti_op = 0x8, sltui_op, addiw_op, addid_op,
addid_op = 0x0b, lu52id_op, cache_op = 0x18, xvldreplb_op = 0xca,
lu52id_op = 0x0c, ldb_op = 0xa0, ldh_op, ldw_op, ldd_op, stb_op, sth_op,
ldb_op = 0xa0, stw_op, std_op, ldbu_op, ldhu_op, ldwu_op, preld_op,
ldh_op = 0xa1, flds_op, fsts_op, fldd_op, fstd_op, vld_op, vst_op, xvld_op,
ldw_op = 0xa2, xvst_op, ldlw_op = 0xb8, ldrw_op, ldld_op, ldrd_op, stlw_op,
ldd_op = 0xa3, strw_op, stld_op, strd_op, vldreplb_op = 0xc2,
stb_op = 0xa4, };
sth_op = 0xa5,
stw_op = 0xa6, enum reg2i14_op {
std_op = 0xa7, llw_op = 0x20, scw_op, lld_op, scd_op, ldptrw_op, stptrw_op,
ldptrd_op, stptrd_op,
}; };
enum reg2i16_op { enum reg2i16_op {
...@@ -65,6 +66,49 @@ enum reg2i16_op { ...@@ -65,6 +66,49 @@ enum reg2i16_op {
bgeu_op = 0x1b, bgeu_op = 0x1b,
}; };
enum reg3_op {
asrtled_op = 0x2, asrtgtd_op,
addw_op = 0x20, addd_op, subw_op, subd_op,
slt_op, sltu_op, maskeqz_op, masknez_op,
nor_op, and_op, or_op, xor_op, orn_op,
andn_op, sllw_op, srlw_op, sraw_op, slld_op,
srld_op, srad_op, rotrb_op, rotrh_op,
rotrw_op, rotrd_op, mulw_op, mulhw_op,
mulhwu_op, muld_op, mulhd_op, mulhdu_op,
mulwdw_op, mulwdwu_op, divw_op, modw_op,
divwu_op, modwu_op, divd_op, modd_op,
divdu_op, moddu_op, crcwbw_op,
crcwhw_op, crcwww_op, crcwdw_op, crccwbw_op,
crccwhw_op, crccwww_op, crccwdw_op, addu12iw_op,
addu12id_op,
adcb_op = 0x60, adch_op, adcw_op, adcd_op,
sbcb_op, sbch_op, sbcw_op, sbcd_op,
rcrb_op, rcrh_op, rcrw_op, rcrd_op,
ldxb_op = 0x7000, ldxh_op = 0x7008, ldxw_op = 0x7010, ldxd_op = 0x7018,
stxb_op = 0x7020, stxh_op = 0x7028, stxw_op = 0x7030, stxd_op = 0x7038,
ldxbu_op = 0x7040, ldxhu_op = 0x7048, ldxwu_op = 0x7050,
preldx_op = 0x7058, fldxs_op = 0x7060, fldxd_op = 0x7068,
fstxs_op = 0x7070, fstxd_op = 0x7078, vldx_op = 0x7080,
vstx_op = 0x7088, xvldx_op = 0x7090, xvstx_op = 0x7098,
amswapw_op = 0x70c0, amswapd_op, amaddw_op, amaddd_op, amandw_op,
amandd_op, amorw_op, amord_op, amxorw_op, amxord_op, ammaxw_op,
ammaxd_op, amminw_op, ammind_op, ammaxwu_op, ammaxdu_op,
amminwu_op, ammindu_op, amswap_dbw_op, amswap_dbd_op, amadd_dbw_op,
amadd_dbd_op, amand_dbw_op, amand_dbd_op, amor_dbw_op, amor_dbd_op,
amxor_dbw_op, amxor_dbd_op, ammax_dbw_op, ammax_dbd_op, ammin_dbw_op,
ammin_dbd_op, ammax_dbwu_op, ammax_dbdu_op, ammin_dbwu_op,
ammin_dbdu_op, fldgts_op = 0x70e8, fldgtd_op,
fldles_op, fldled_op, fstgts_op, fstgtd_op, fstles_op, fstled_op,
ldgtb_op, ldgth_op, ldgtw_op, ldgtd_op, ldleb_op, ldleh_op, ldlew_op,
ldled_op, stgtb_op, stgth_op, stgtw_op, stgtd_op, stleb_op, stleh_op,
stlew_op, stled_op,
};
enum reg2_op {
iocsrrdb_op = 0x19200, iocsrrdh_op, iocsrrdw_op, iocsrrdd_op,
iocsrwrb_op, iocsrwrh_op, iocsrwrw_op, iocsrwrd_op,
};
struct reg0i26_format { struct reg0i26_format {
unsigned int immediate_h : 10; unsigned int immediate_h : 10;
unsigned int immediate_l : 16; unsigned int immediate_l : 16;
...@@ -84,6 +128,12 @@ struct reg1i21_format { ...@@ -84,6 +128,12 @@ struct reg1i21_format {
unsigned int opcode : 6; unsigned int opcode : 6;
}; };
struct reg2_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int opcode : 22;
};
struct reg2i12_format { struct reg2i12_format {
unsigned int rd : 5; unsigned int rd : 5;
unsigned int rj : 5; unsigned int rj : 5;
...@@ -91,6 +141,18 @@ struct reg2i12_format { ...@@ -91,6 +141,18 @@ struct reg2i12_format {
unsigned int opcode : 10; unsigned int opcode : 10;
}; };
struct reg2i14_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int simmediate : 14;
unsigned int opcode : 8;
};
struct reg0i15_format {
unsigned int simmediate : 15;
unsigned int opcode : 17;
};
struct reg2i16_format { struct reg2i16_format {
unsigned int rd : 5; unsigned int rd : 5;
unsigned int rj : 5; unsigned int rj : 5;
...@@ -98,13 +160,32 @@ struct reg2i16_format { ...@@ -98,13 +160,32 @@ struct reg2i16_format {
unsigned int opcode : 6; unsigned int opcode : 6;
}; };
struct reg3_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int rk : 5;
unsigned int opcode : 17;
};
struct reg2csr_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int csr : 14;
unsigned int opcode : 8;
};
union loongarch_instruction { union loongarch_instruction {
unsigned int word; unsigned int word;
struct reg0i26_format reg0i26_format; struct reg0i26_format reg0i26_format;
struct reg1i20_format reg1i20_format; struct reg1i20_format reg1i20_format;
struct reg1i21_format reg1i21_format; struct reg1i21_format reg1i21_format;
struct reg3_format reg3_format;
struct reg2_format reg2_format;
struct reg2i12_format reg2i12_format; struct reg2i12_format reg2i12_format;
struct reg2i14_format reg2i14_format;
struct reg2i16_format reg2i16_format; struct reg2i16_format reg2i16_format;
struct reg2csr_format reg2csr_format;
struct reg0i15_format reg0i15_format;
}; };
#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) #define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __LOONGARCH_KVM_HOST_H__
#define __LOONGARCH_KVM_HOST_H__
#include <linux/cpumask.h>
#include <linux/mutex.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <asm/inst.h>
/* Loongarch KVM register ids */
#define LOONGARCH_CSR_32(_R, _S) \
(KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
#define LOONGARCH_CSR_64(_R, _S) \
(KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
#define KVM_IOC_CSRID(id) LOONGARCH_CSR_64(id, 0)
#define KVM_GET_IOC_CSRIDX(id) ((id & KVM_CSR_IDX_MASK) >> 3)
#define LOONGSON_VIRT_REG_BASE 0x1f000000
#define KVM_MAX_VCPUS 256
#define KVM_USER_MEM_SLOTS 256
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1)
#define KVM_INVALID_ADDR 0xdeadbeef
#define KVM_HVA_ERR_BAD (-1UL)
#define KVM_HVA_ERR_RO_BAD (-2UL)
static inline bool kvm_is_error_hva(unsigned long addr)
{
return IS_ERR_VALUE(addr);
}
struct kvm_vm_stat {
ulong remote_tlb_flush;
u64 vm_ioctl_irq_line;
u64 ls7a_ioapic_update;
u64 ls7a_ioapic_set_irq;
u64 ioapic_reg_write;
u64 ioapic_reg_read;
u64 set_ls7a_ioapic;
u64 get_ls7a_ioapic;
u64 set_ls3a_ext_irq;
u64 get_ls3a_ext_irq;
u64 trigger_ls3a_ext_irq;
u64 pip_read_exits;
u64 pip_write_exits;
u64 ls7a_msi_irq;
};
struct kvm_vcpu_stat {
u64 excep_exits[EXCCODE_INT_START];
u64 idle_exits;
u64 signal_exits;
u64 int_exits;
u64 rdcsr_cpu_feature_exits;
u64 rdcsr_misc_func_exits;
u64 rdcsr_ipi_access_exits;
u64 cpucfg_exits;
u64 huge_dec_exits;
u64 huge_thp_exits;
u64 huge_adjust_exits;
u64 huge_set_exits;
u64 huge_merge_exits;
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_success_ns;
u64 halt_poll_fail_ns;
u64 halt_poll_invalid;
u64 halt_wakeup;
};
#define KVM_MEMSLOT_DISABLE_THP (1UL << 17)
struct kvm_arch_memory_slot {
unsigned int flags;
};
enum {
IOCSR_FEATURES,
IOCSR_VENDOR,
IOCSR_CPUNAME,
IOCSR_NODECNT,
IOCSR_MISC_FUNC,
IOCSR_MAX
};
struct kvm_context {
unsigned long gid_mask;
unsigned long gid_ver_mask;
unsigned long gid_fisrt_ver;
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
};
struct kvm_arch {
/* Guest physical mm */
struct mm_struct gpa_mm;
/* Mask of CPUs needing GPA ASID flush */
cpumask_t asid_flush_mask;
unsigned char online_vcpus;
unsigned char is_migrate;
s64 stablecounter_gftoffset;
u32 cpucfg_lasx;
struct ls7a_kvm_ioapic *v_ioapic;
struct ls3a_kvm_ipi *v_gipi;
struct ls3a_kvm_routerirq *v_routerirq;
struct ls3a_kvm_extirq *v_extirq;
spinlock_t iocsr_lock;
struct kvm_iocsr_entry iocsr[IOCSR_MAX];
struct kvm_cpucfg cpucfgs;
struct kvm_context __percpu *vmcs;
};
#define LOONGARCH_CSRS 0x100
#define CSR_UCWIN_BASE 0x100
#define CSR_UCWIN_SIZE 0x10
#define CSR_DMWIN_BASE 0x180
#define CSR_DMWIN_SIZE 0x4
#define CSR_PERF_BASE 0x200
#define CSR_PERF_SIZE 0x8
#define CSR_DEBUG_BASE 0x500
#define CSR_DEBUG_SIZE 0x3
#define CSR_ALL_SIZE 0x800
struct loongarch_csrs {
unsigned long csrs[CSR_ALL_SIZE];
};
/* Resume Flags */
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
#define RESUME_GUEST 0
#define RESUME_GUEST_DR RESUME_FLAG_DR
#define RESUME_HOST RESUME_FLAG_HOST
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
EMULATE_FAIL, /* can't emulate this instruction */
EMULATE_WAIT, /* WAIT instruction */
EMULATE_PRIV_FAIL,
EMULATE_EXCEPT, /* A guest exception has been generated */
EMULATE_PV_HYPERCALL, /* HYPCALL instruction */
EMULATE_DEBUG, /* Emulate guest kernel debug */
EMULATE_DO_IOCSR, /* handle IOCSR request */
};
#define KVM_LARCH_FPU (0x1 << 0)
#define KVM_LARCH_LSX (0x1 << 1)
#define KVM_LARCH_LASX (0x1 << 2)
#define KVM_LARCH_DATA_HWBP (0x1 << 3)
#define KVM_LARCH_INST_HWBP (0x1 << 4)
#define KVM_LARCH_HWBP (KVM_LARCH_DATA_HWBP | KVM_LARCH_INST_HWBP)
#define KVM_LARCH_RESET (0x1 << 5)
#define KVM_LARCH_PERF (0x1 << 6)
struct kvm_vcpu_arch {
unsigned long guest_eentry;
unsigned long host_eentry;
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
/* Host registers preserved across guest mode execution */
unsigned long host_stack;
unsigned long host_gp;
unsigned long host_pgd;
unsigned long host_pgdhi;
unsigned long host_entryhi;
/* Host CSR registers used when handling exits from guest */
unsigned long badv;
unsigned long host_estat;
unsigned long badi;
unsigned long host_ecfg;
unsigned long host_percpu;
u32 is_hypcall;
/* GPRS */
unsigned long gprs[32];
unsigned long pc;
/* FPU State */
struct loongarch_fpu fpu FPU_ALIGN;
/* Which auxiliary state is loaded (KVM_LOONGARCH_AUX_*) */
unsigned int aux_inuse;
/* CSR State */
struct loongarch_csrs *csr;
/* GPR used as IO source/target */
u32 io_gpr;
struct hrtimer swtimer;
/* Count timer control KVM register */
u32 count_ctl;
/* Bitmask of exceptions that are pending */
unsigned long irq_pending;
/* Bitmask of pending exceptions to be cleared */
unsigned long irq_clear;
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
/* vcpu's vpid is different on each host cpu in an smp system */
u64 vpid[NR_CPUS];
/* Period of stable timer tick in ns */
u64 timer_period;
/* Frequency of stable timer in Hz */
u64 timer_mhz;
/* Stable bias from the raw time */
u64 timer_bias;
/* Dynamic nanosecond bias (multiple of timer_period) to avoid overflow */
s64 timer_dyn_bias;
/* Save ktime */
ktime_t stable_ktime_saved;
u64 core_ext_ioisr[4];
/* Last CPU the VCPU state was loaded on */
int last_sched_cpu;
/* Last CPU the VCPU actually executed guest code on */
int last_exec_cpu;
u8 fpu_enabled;
u8 lsx_enabled;
/* paravirt steal time */
struct {
u64 guest_addr;
u64 last_steal;
struct gfn_to_pfn_cache cache;
} st;
struct kvm_guest_debug_arch guest_debug;
/* save host pmu csr */
u64 perf_ctrl[4];
u64 perf_cntr[4];
};
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
{
return csr->csrs[reg];
}
static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, \
unsigned long val)
{
csr->csrs[reg] = val;
}
/* Helpers */
static inline bool _kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
{
return cpu_has_fpu && arch->fpu_enabled;
}
static inline bool _kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
{
return cpu_has_lsx && arch->lsx_enabled;
}
bool _kvm_guest_has_lasx(struct kvm_vcpu *vcpu);
void _kvm_init_fault(void);
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
/* MMU handling */
int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
void kvm_flush_tlb_all(void);
void _kvm_destroy_mm(struct kvm *kvm);
pgd_t *kvm_pgd_alloc(void);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
enum _kvm_fault_result {
KVM_LOONGARCH_MAPPED = 0,
KVM_LOONGARCH_GVA,
KVM_LOONGARCH_GPA,
KVM_LOONGARCH_TLB,
KVM_LOONGARCH_TLBINV,
KVM_LOONGARCH_TLBMOD,
};
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end, bool blockable);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
static inline void update_pc(struct kvm_vcpu_arch *arch)
{
arch->pc += 4;
}
/**
* kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
* @vcpu: Virtual CPU.
*
* Returns: Whether the TLBL exception was likely due to an instruction
* fetch fault rather than a data load fault.
*/
static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
{
if (arch->pc == arch->badv)
return true;
return false;
}
/* Misc */
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
extern int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvm_exception_entry(void);
#endif /* __LOONGARCH_KVM_HOST_H__ */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_KVM_PARA_H
#define _ASM_LOONGARCH_KVM_PARA_H
/*
* Hypcall code field
*/
#define KVM_HC_CODE_SERIVCE 0x0
#define KVM_HC_CODE_SWDBG 0x5
/*
* function id
* 0x00000 ~ 0xfffff Standard Hypervisor Calls
*/
#define KVM_HC_FUNC_FEATURE 0x0
#define KVM_HC_FUNC_NOTIFY 0x1
#define KVM_HC_FUNC_IPI 0x2
/*
* LoongArch support PV feature list
*/
#define KVM_FEATURE_STEAL_TIME 0
#define KVM_FEATURE_MULTI_IPI 1
/*
* LoongArch hypcall return code
*/
#define KVM_RET_SUC 1
#define KVM_RET_NOT_SUPPORTED -1
static inline bool kvm_check_and_clear_guest_paused(void)
{
return false;
}
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
static inline unsigned int kvm_arch_para_hints(void)
{
return 0;
}
#endif /* _ASM_LOONGARCH_KVM_PARA_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH64_KVM_TYPES_H
#define _ASM_LOONGARCH64_KVM_TYPES_H
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 4
#endif /* _ASM_LOONGARCH64_KVM_TYPES_H */
...@@ -78,16 +78,6 @@ extern void calculate_cpu_foreign_map(void); ...@@ -78,16 +78,6 @@ extern void calculate_cpu_foreign_map(void);
*/ */
extern void show_ipi_list(struct seq_file *p, int prec); extern void show_ipi_list(struct seq_file *p, int prec);
/*
* This function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
static inline void smp_send_reschedule(int cpu)
{
loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
}
static inline void arch_send_call_function_single_ipi(int cpu) static inline void arch_send_call_function_single_ipi(int cpu)
{ {
loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION); loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2020 Loongson Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
* Authors: Xing Li <lixing@loongson.cn>
*/
#ifndef __LINUX_KVM_LOONGARCH_H
#define __LINUX_KVM_LOONGARCH_H
#include <linux/types.h>
#ifndef __KERNEL__
#include <stdint.h>
#endif
#define __KVM_HAVE_GUEST_DEBUG
#define KVM_GUESTDBG_USE_SW_BP 0x00010000
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
#define KVM_DATA_HW_BREAKPOINT_NUM 8
#define KVM_INST_HW_BREAKPOINT_NUM 8
/*
* KVM Loongarch specific structures and definitions.
*
* Some parts derived from the x86 version of this file.
*/
#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
/*
* for KVM_GET_REGS and KVM_SET_REGS
*/
struct kvm_regs {
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
__u64 gpr[32];
__u64 pc;
};
/*
* for KVM_GET_CPUCFG
*/
struct kvm_cpucfg {
/* out (KVM_GET_CPUCFG) */
__u32 cpucfg[64];
};
/*
* for KVM_GET_FPU and KVM_SET_FPU
*/
struct kvm_fpu {
__u32 fcsr;
__u32 none;
__u64 fcc; /* 8x8 */
struct kvm_fpureg {
__u64 val64[4]; //support max 256 bits
} fpr[32];
};
/*
* For LOONGARCH, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
* registers. The id field is broken down as follows:
*
* bits[63..52] - As per linux/kvm.h
* bits[51..32] - Must be zero.
* bits[31..16] - Register set.
*
* Register set = 0: GP registers from kvm_regs (see definitions below).
*
* Register set = 1: CSR registers.
*
* Register set = 2: KVM specific registers (see definitions below).
*
* Register set = 3: FPU / SIMD registers (see definitions below).
*
* Other sets registers may be added in the future. Each set would
* have its own identifier in bits[31..16].
*/
#define KVM_REG_LOONGARCH_GP (KVM_REG_LOONGARCH | 0x00000ULL)
#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL)
#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
#define KVM_REG_LOONGARCH_FPU (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x30000ULL)
#define KVM_CSR_IDX_MASK (0x10000 - 1)
/*
* KVM_REG_LOONGARCH_KVM - KVM specific control registers.
*/
#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
#define __KVM_HAVE_IRQ_LINE
struct kvm_debug_exit_arch {
__u64 era;
__u32 fwps;
__u32 mwps;
__u32 exception;
};
/* for KVM_SET_GUEST_DEBUG */
struct hw_breakpoint {
__u64 addr;
__u64 mask;
__u32 asid;
__u32 ctrl;
};
struct kvm_guest_debug_arch {
struct hw_breakpoint data_breakpoint[KVM_DATA_HW_BREAKPOINT_NUM];
struct hw_breakpoint inst_breakpoint[KVM_INST_HW_BREAKPOINT_NUM];
int inst_bp_nums, data_bp_nums;
};
/* definition of registers in kvm_run */
struct kvm_sync_regs {
};
/* dummy definition */
struct kvm_sregs {
};
struct kvm_iocsr_entry {
__u32 addr;
__u32 pad;
__u64 data;
};
struct kvm_csr_entry {
__u32 index;
__u32 reserved;
__u64 data;
};
/* for KVM_GET_MSRS and KVM_SET_MSRS */
struct kvm_msrs {
__u32 ncsrs; /* number of msrs in entries */
__u32 pad;
struct kvm_csr_entry entries[0];
};
struct kvm_loongarch_interrupt {
/* in */
__u32 cpu;
__u32 irq;
};
#define KVM_IRQCHIP_LS7A_IOAPIC 0x0
#define KVM_IRQCHIP_LS3A_GIPI 0x1
#define KVM_IRQCHIP_LS3A_HT_IRQ 0x2
#define KVM_IRQCHIP_LS3A_ROUTE 0x3
#define KVM_IRQCHIP_LS3A_EXTIRQ 0x4
#define KVM_IRQCHIP_LS3A_IPMASK 0x5
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 64
#define KVM_MAX_CORES 256
#define KVM_EXTIOI_IRQS (256)
#define KVM_EXTIOI_IRQS_BITMAP_SIZE (KVM_EXTIOI_IRQS / 8)
/* map to ipnum per 32 irqs */
#define KVM_EXTIOI_IRQS_IPMAP_SIZE (KVM_EXTIOI_IRQS / 32)
#define KVM_EXTIOI_IRQS_PER_GROUP 32
#define KVM_EXTIOI_IRQS_COREMAP_SIZE (KVM_EXTIOI_IRQS)
#define KVM_EXTIOI_IRQS_NODETYPE_SIZE 16
struct ls7a_ioapic_state {
/* 0x000 interrupt id register */
__u64 int_id;
/* 0x020 interrupt mask register */
__u64 int_mask;
/* 0x040 1=msi */
__u64 htmsi_en;
/* 0x060 edge=1 level =0 */
__u64 intedge;
/* 0x080 for clean edge int,set 1 clean,set 0 is noused */
__u64 intclr;
/* 0x0c0 */
__u64 auto_crtl0;
/* 0x0e0 */
__u64 auto_crtl1;
/* 0x100 - 0x140 */
__u8 route_entry[64];
/* 0x200 - 0x240 */
__u8 htmsi_vector[64];
/* 0x300 */
__u64 intisr_chip0;
/* 0x320 */
__u64 intisr_chip1;
/* edge detection */
__u64 last_intirr;
/* 0x380 interrupt request register */
__u64 intirr;
/* 0x3a0 interrupt service register */
__u64 intisr;
/* 0x3e0 interrupt level polarity selection register,
* 0 for high level tirgger
*/
__u64 int_polarity;
};
struct loongarch_gipi_single {
__u32 status;
__u32 en;
__u32 set;
__u32 clear;
__u64 buf[4];
};
struct loongarch_gipiState {
struct loongarch_gipi_single core[KVM_MAX_CORES];
};
struct kvm_loongarch_ls3a_extirq_state {
union ext_en_r {
uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
} ext_en_r;
union bounce_r {
uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
} bounce_r;
union ext_isr_r {
uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
} ext_isr_r;
union ext_core_isr_r {
uint64_t reg_u64[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE];
} ext_core_isr_r;
union ip_map_r {
uint64_t reg_u64;
uint32_t reg_u32[KVM_EXTIOI_IRQS_IPMAP_SIZE / 4];
uint8_t reg_u8[KVM_EXTIOI_IRQS_IPMAP_SIZE];
} ip_map_r;
union core_map_r {
uint64_t reg_u64[KVM_EXTIOI_IRQS_COREMAP_SIZE / 8];
uint32_t reg_u32[KVM_EXTIOI_IRQS_COREMAP_SIZE / 4];
uint8_t reg_u8[KVM_EXTIOI_IRQS_COREMAP_SIZE];
} core_map_r;
union node_type_r {
uint64_t reg_u64[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 4];
uint32_t reg_u32[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 2];
uint16_t reg_u16[KVM_EXTIOI_IRQS_NODETYPE_SIZE];
uint8_t reg_u8[KVM_EXTIOI_IRQS_NODETYPE_SIZE * 2];
} node_type_r;
};
struct loongarch_kvm_irqchip {
__u16 chip_id;
__u16 len;
__u16 vcpu_id;
__u16 reserved;
char data[0];
};
#endif /* __LINUX_KVM_LOONGARCH_H */
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/kvm_host.h>
#include <asm/cpu-info.h> #include <asm/cpu-info.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -269,3 +270,35 @@ void output_pbe_defines(void) ...@@ -269,3 +270,35 @@ void output_pbe_defines(void)
} }
#endif #endif
void output_kvm_defines(void)
{
COMMENT(" KVM/LOONGISA Specific offsets. ");
OFFSET(VCPU_FCSR0, kvm_vcpu_arch, fpu.fcsr);
OFFSET(VCPU_FCC, kvm_vcpu_arch, fpu.fcc);
BLANK();
OFFSET(KVM_VCPU_ARCH, kvm_vcpu, arch);
OFFSET(KVM_VCPU_KVM, kvm_vcpu, kvm);
OFFSET(KVM_VCPU_RUN, kvm_vcpu, run);
BLANK();
OFFSET(KVM_ARCH_HSTACK, kvm_vcpu_arch, host_stack);
OFFSET(KVM_ARCH_HGP, kvm_vcpu_arch, host_gp);
OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit);
OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd);
OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry);
OFFSET(KVM_ARCH_GPC, kvm_vcpu_arch, pc);
OFFSET(KVM_ARCH_GGPR, kvm_vcpu_arch, gprs);
OFFSET(KVM_ARCH_HESTAT, kvm_vcpu_arch, host_estat);
OFFSET(KVM_ARCH_HBADV, kvm_vcpu_arch, badv);
OFFSET(KVM_ARCH_HBADI, kvm_vcpu_arch, badi);
OFFSET(KVM_ARCH_ISHYPCALL, kvm_vcpu_arch, is_hypcall);
OFFSET(KVM_ARCH_HECFG, kvm_vcpu_arch, host_ecfg);
OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry);
OFFSET(KVM_ARCH_HPERCPU, kvm_vcpu_arch, host_percpu);
OFFSET(KVM_GPGD, kvm, arch.gpa_mm.pgd);
BLANK();
}
...@@ -151,6 +151,17 @@ void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action) ...@@ -151,6 +151,17 @@ void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
ipi_write_action(cpu_logical_map(i), (u32)action); ipi_write_action(cpu_logical_map(i), (u32)action);
} }
/*
* This function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
void smp_send_reschedule(int cpu)
{
loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
irqreturn_t loongson3_ipi_interrupt(int irq, void *dev) irqreturn_t loongson3_ipi_interrupt(int irq, void *dev)
{ {
unsigned int action; unsigned int action;
......
# SPDX-License-Identifier: GPL-2.0
#
# KVM configuration
#
source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
help
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and disabled.
if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
select EXPORT_UASM
select PREEMPT_NOTIFIERS
select ANON_INODES
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
select MMU_NOTIFIER
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_EVENTFD
select HAVE_KVM_MSI
select SRCU
select KVM_VFIO
help
Support for hosting Guest kernels.
choice
prompt "Virtualization mode"
depends on KVM
default KVM_LOONGARCH_LVZ
config KVM_LOONGARCH_LVZ
bool "LOONGARCH Virtualization (VZ) ASE"
help
Use the LOONGARCH Virtualization (VZ) ASE to virtualize guests. This
supports running unmodified guest kernels, but requires hardware
support.
endchoice
source "drivers/vhost/Kconfig"
endif # VIRTUALIZATION
# SPDX-License-Identifier: GPL-2.0
# Makefile for KVM support for LoongArch
#
OBJECT_FILES_NON_STANDARD_entry.o := y
common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \
irqchip.o eventfd.o)
KVM := ../../../virt/kvm
common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/loongarch/kvm
kvm-objs := $(common-objs-y) loongarch.o emulate.o interrupt.o
kvm-objs += hypcall.o
kvm-objs += mmu.o
kvm-objs += kvm_compat.o
kvm-objs += exit.o intc/ls7a_irq.o intc/ls3a_ipi.o intc/irqchip-debug.o\
timer.o intc/ls3a_ext_irq.o irqfd.o csr.o
obj-$(CONFIG_KVM) += kvm.o
obj-y += entry.o fpu.o
此差异已折叠。
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/ktime.h>
#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/random.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/cpu-info.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/inst.h>
#include "kvmcpu.h"
#include "trace.h"
int _kvm_emu_idle(struct kvm_vcpu *vcpu)
{
++vcpu->stat.idle_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_IDLE);
if (!vcpu->arch.irq_pending) {
kvm_save_timer(vcpu);
kvm_vcpu_block(vcpu);
/*
* We we are runnable, then definitely go off to user space to
* check if any I/O interrupts are pending.
*/
if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
}
}
return EMULATE_DONE;
}
int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
{
struct kvm_run *run = vcpu->run;
unsigned int rd, op8, opcode;
unsigned long rd_val = 0;
void *data = run->mmio.data;
unsigned long curr_pc;
int ret = 0;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
update_pc(&vcpu->arch);
op8 = (inst.word >> 24) & 0xff;
run->mmio.phys_addr = vcpu->arch.badv;
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
goto out_fail;
if (op8 < 0x28) {
/* stptrw/d process */
rd = inst.reg2i14_format.rd;
opcode = inst.reg2i14_format.opcode;
switch (opcode) {
case stptrd_op:
run->mmio.len = 8;
*(unsigned long *)data = vcpu->arch.gprs[rd];
break;
case stptrw_op:
run->mmio.len = 4;
*(unsigned int *)data = vcpu->arch.gprs[rd];
break;
default:
break;
}
} else if (op8 < 0x30) {
/* st.b/h/w/d process */
rd = inst.reg2i12_format.rd;
opcode = inst.reg2i12_format.opcode;
rd_val = vcpu->arch.gprs[rd];
switch (opcode) {
case std_op:
run->mmio.len = 8;
*(unsigned long *)data = rd_val;
break;
case stw_op:
run->mmio.len = 4;
*(unsigned int *)data = rd_val;
break;
case sth_op:
run->mmio.len = 2;
*(unsigned short *)data = rd_val;
break;
case stb_op:
run->mmio.len = 1;
*(unsigned char *)data = rd_val;
break;
default:
kvm_err("Store not yet supporded (inst=0x%08x)\n",
inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
goto out_fail;
}
} else if (op8 == 0x38) {
/* stxb/h/w/d process */
rd = inst.reg3_format.rd;
opcode = inst.reg3_format.opcode;
switch (opcode) {
case stxb_op:
run->mmio.len = 1;
*(unsigned char *)data = vcpu->arch.gprs[rd];
break;
case stxh_op:
run->mmio.len = 2;
*(unsigned short *)data = vcpu->arch.gprs[rd];
break;
case stxw_op:
run->mmio.len = 4;
*(unsigned int *)data = vcpu->arch.gprs[rd];
break;
case stxd_op:
run->mmio.len = 8;
*(unsigned long *)data = vcpu->arch.gprs[rd];
break;
default:
kvm_err("Store not yet supporded (inst=0x%08x)\n",
inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
goto out_fail;
}
} else {
kvm_err("Store not yet supporded (inst=0x%08x)\n",
inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
goto out_fail;
}
/* All MMIO emulate in kernel go through the common interface */
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
run->mmio.len, data);
if (!ret) {
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
return EMULATE_DO_MMIO;
out_fail:
/* Rollback PC if emulation was unsuccessful */
vcpu->arch.pc = curr_pc;
return EMULATE_FAIL;
}
int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
{
unsigned int op8, opcode, rd;
int ret = 0;
struct kvm_run *run = vcpu->run;
run->mmio.phys_addr = vcpu->arch.badv;
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
return EMULATE_FAIL;
vcpu->mmio_needed = 2; /* signed */
op8 = (inst.word >> 24) & 0xff;
if (op8 < 0x28) {
/* ldptr.w/d process */
rd = inst.reg2i14_format.rd;
opcode = inst.reg2i14_format.opcode;
switch (opcode) {
case ldptrd_op:
run->mmio.len = 8;
break;
case ldptrw_op:
run->mmio.len = 4;
break;
default:
break;
}
} else if (op8 < 0x2f) {
/* ld.b/h/w/d, ld.bu/hu/wu process */
rd = inst.reg2i12_format.rd;
opcode = inst.reg2i12_format.opcode;
switch (opcode) {
case ldd_op:
run->mmio.len = 8;
break;
case ldwu_op:
vcpu->mmio_needed = 1; /* unsigned */
run->mmio.len = 4;
break;
case ldw_op:
run->mmio.len = 4;
break;
case ldhu_op:
vcpu->mmio_needed = 1; /* unsigned */
run->mmio.len = 2;
break;
case ldh_op:
run->mmio.len = 2;
break;
case ldbu_op:
vcpu->mmio_needed = 1; /* unsigned */
run->mmio.len = 1;
break;
case ldb_op:
run->mmio.len = 1;
break;
default:
kvm_err("Load not yet supporded (inst=0x%08x)\n",
inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->mmio_needed = 0;
return EMULATE_FAIL;
}
} else if (op8 == 0x38) {
/* ldxb/h/w/d, ldxb/h/wu, ldgtb/h/w/d, ldleb/h/w/d process */
rd = inst.reg3_format.rd;
opcode = inst.reg3_format.opcode;
switch (opcode) {
case ldxb_op:
run->mmio.len = 1;
break;
case ldxbu_op:
run->mmio.len = 1;
vcpu->mmio_needed = 1; /* unsigned */
break;
case ldxh_op:
run->mmio.len = 2;
break;
case ldxhu_op:
run->mmio.len = 2;
vcpu->mmio_needed = 1; /* unsigned */
break;
case ldxw_op:
run->mmio.len = 4;
break;
case ldxwu_op:
run->mmio.len = 4;
vcpu->mmio_needed = 1; /* unsigned */
break;
case ldxd_op:
run->mmio.len = 8;
break;
default:
kvm_err("Load not yet supporded (inst=0x%08x)\n",
inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->mmio_needed = 0;
return EMULATE_FAIL;
}
} else {
kvm_err("Load not yet supporded (inst=0x%08x) @ %lx\n",
inst.word, vcpu->arch.pc);
vcpu->mmio_needed = 0;
return EMULATE_FAIL;
}
/* Set for _kvm_complete_mmio_read use */
vcpu->arch.io_gpr = rd;
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
run->mmio.len, run->mmio.data);
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
if (!ret) {
_kvm_complete_mmio_read(vcpu, run);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
return EMULATE_DO_MMIO;
}
int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE;
/* update with new PC */
update_pc(&vcpu->arch);
switch (run->mmio.len) {
case 8:
*gpr = *(s64 *)run->mmio.data;
break;
case 4:
if (vcpu->mmio_needed == 2) {
*gpr = *(int *)run->mmio.data;
} else
*gpr = *(unsigned int *)run->mmio.data;
break;
case 2:
if (vcpu->mmio_needed == 2)
*gpr = *(short *) run->mmio.data;
else
*gpr = *(unsigned short *)run->mmio.data;
break;
case 1:
if (vcpu->mmio_needed == 2)
*gpr = *(char *) run->mmio.data;
else
*gpr = *(unsigned char *) run->mmio.data;
break;
default:
kvm_err("Bad MMIO length: %d,addr is 0x%lx",
run->mmio.len, vcpu->arch.badv);
er = EMULATE_FAIL;
break;
}
return er;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/stackframe.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include "kvm_compat.h"
#define RESUME_HOST (1 << 1)
#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
#define PT_GPR_OFFSET(x) (PT_R0 + 8*x)
.text
.macro kvm_save_guest_gprs base
.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
KVM_LONG_S $r\n, \base, GGPR_OFFSET(\n)
.endr
.endm
.macro kvm_restore_guest_gprs base
.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
KVM_LONG_L $r\n, \base, GGPR_OFFSET(\n)
.endr
.endm
.macro kvm_save_host_gpr base
.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
KVM_LONG_S $r\n, \base, PT_GPR_OFFSET(\n)
.endr
.endm
.macro kvm_restore_host_gpr base
.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
KVM_LONG_L $r\n, \base, PT_GPR_OFFSET(\n)
.endr
.endm
/*
* prepare switch to guest
* @param:
* KVM_ARCH: kvm_vcpu_arch, don't touch it until 'ertn'
* GPRNUM: KVM_ARCH gpr number
* tmp, tmp1: temp register
*/
.macro kvm_switch_to_guest KVM_ARCH GPRNUM tmp tmp1
/* set host excfg.VS=0, all exceptions share one exception entry */
csrrd \tmp, KVM_CSR_ECFG
bstrins.w \tmp, zero, (KVM_ECFG_VS_SHIFT + KVM_ECFG_VS_WIDTH - 1), KVM_ECFG_VS_SHIFT
csrwr \tmp, KVM_CSR_ECFG
/* Load up the new EENTRY */
KVM_LONG_L \tmp, \KVM_ARCH, KVM_ARCH_GEENTRY
csrwr \tmp, KVM_CSR_EENTRY
/* Set Guest ERA */
KVM_LONG_L \tmp, \KVM_ARCH, KVM_ARCH_GPC
csrwr \tmp, KVM_CSR_ERA
/* Save host PGDL */
csrrd \tmp, KVM_CSR_PGDL
KVM_LONG_S \tmp, \KVM_ARCH, KVM_ARCH_HPGD
/* Switch to kvm */
KVM_LONG_L \tmp1, \KVM_ARCH, KVM_VCPU_KVM - KVM_VCPU_ARCH
/* Load guest PGDL */
lu12i.w \tmp, KVM_GPGD
srli.w \tmp, \tmp, 12
ldx.d \tmp, \tmp1, \tmp
csrwr \tmp, KVM_CSR_PGDL
/* Mix GID and RID */
csrrd \tmp1, KVM_CSR_GSTAT
bstrpick.w \tmp1, \tmp1, (KVM_GSTAT_GID_SHIFT + KVM_GSTAT_GID_WIDTH - 1), KVM_GSTAT_GID_SHIFT
csrrd \tmp, KVM_CSR_GTLBC
bstrins.w \tmp, \tmp1, (KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1), KVM_GTLBC_TGID_SHIFT
csrwr \tmp, KVM_CSR_GTLBC
/*
* Switch to guest:
* GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0
* ertn
*/
/* Prepare enable Intr before enter guest */
ori \tmp, zero, KVM_PRMD_PIE
csrxchg \tmp, \tmp, KVM_CSR_PRMD
/* Set PVM bit to setup ertn to guest context */
ori \tmp, zero, KVM_GSTAT_PVM
csrxchg \tmp, \tmp, KVM_CSR_GSTAT
/* Load Guest gprs */
kvm_restore_guest_gprs \KVM_ARCH
/* Load KVM_ARCH register */
KVM_LONG_L \KVM_ARCH, \KVM_ARCH, GGPR_OFFSET(\GPRNUM)
ertn
.endm
#ifndef EXCPTION_ENTRY
#define EXCPTION_ENTRY(name) \
.globl name ASM_NL \
.p2align 12; \
name: \
.cfi_startproc;
#endif
#ifndef EXCPTION_ENDPROC
#define EXCPTION_ENDPROC(name) \
.cfi_endproc; \
SYM_END(name, SYM_T_FUNC)
#endif
/* load kvm_vcpu to a2 and store a1 for free use */
EXCPTION_ENTRY(kvm_exception_entry)
csrwr a2, KVM_TEMP_KS
csrrd a2, KVM_VCPU_KS
KVM_LONG_ADDI a2, a2, KVM_VCPU_ARCH
/* After save gprs, free to use any gpr */
kvm_save_guest_gprs a2
/* Save guest a2 */
csrrd t0, KVM_TEMP_KS
KVM_LONG_S t0, a2, GGPR_OFFSET(REG_A2)
b kvm_exit_entry
EXCPTION_ENDPROC(kvm_exception_entry)
/* a2: kvm_vcpu_arch, a1 is free to use */
SYM_FUNC_START(kvm_exit_entry)
csrrd s1, KVM_VCPU_KS
KVM_LONG_L s0, s1, KVM_VCPU_RUN
csrrd t0, KVM_CSR_ESTAT
KVM_LONG_S t0, a2, KVM_ARCH_HESTAT
csrrd t0, KVM_CSR_ERA
KVM_LONG_S t0, a2, KVM_ARCH_GPC
csrrd t0, KVM_CSR_BADV
KVM_LONG_S t0, a2, KVM_ARCH_HBADV
csrrd t0, KVM_CSR_BADI
KVM_LONG_S t0, a2, KVM_ARCH_HBADI
/* Restore host excfg.VS */
csrrd t0, KVM_CSR_ECFG
KVM_LONG_L t1, a2, KVM_ARCH_HECFG
or t0, t0, t1
csrwr t0, KVM_CSR_ECFG
/* Restore host eentry */
KVM_LONG_L t0, a2, KVM_ARCH_HEENTRY
csrwr t0, KVM_CSR_EENTRY
#if defined(CONFIG_CPU_HAS_FPU)
/* Save FPU context */
csrrd t0, KVM_CSR_EUEN
ori t1, zero, KVM_EUEN_FPEN | KVM_EUEN_LSXEN | KVM_EUEN_LASXEN
and t2, t0, t1
beqz t2, 1f
movfcsr2gr t3, fcsr0
INT_S t3, a2, VCPU_FCSR0
movcf2gr t3, $fcc0
or t2, t3, zero
movcf2gr t3, $fcc1
bstrins.d t2, t3, 0xf, 0x8
movcf2gr t3, $fcc2
bstrins.d t2, t3, 0x17, 0x10
movcf2gr t3, $fcc3
bstrins.d t2, t3, 0x1f, 0x18
movcf2gr t3, $fcc4
bstrins.d t2, t3, 0x27, 0x20
movcf2gr t3, $fcc5
bstrins.d t2, t3, 0x2f, 0x28
movcf2gr t3, $fcc6
bstrins.d t2, t3, 0x37, 0x30
movcf2gr t3, $fcc7
bstrins.d t2, t3, 0x3f, 0x38
KVM_LONG_S t2, a2, VCPU_FCC
movgr2fcsr fcsr0, zero
1:
#endif
KVM_LONG_L t0, a2, KVM_ARCH_HPGD
csrwr t0, KVM_CSR_PGDL
/* Disable PVM bit for keeping from into guest */
ori t0, zero, KVM_GSTAT_PVM
csrxchg zero, t0, KVM_CSR_GSTAT
/* Clear GTLBC.TGID field */
csrrd t0, KVM_CSR_GTLBC
bstrins.w t0, zero, KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1, KVM_GTLBC_TGID_SHIFT
csrwr t0, KVM_CSR_GTLBC
/* Enable Address Map mode */
ori t0, zero, (1 << KVM_CRMD_DACM_SHIFT)|(1 << KVM_CRMD_DACF_SHIFT) | KVM_CRMD_PG |PLV_KERN
csrwr t0, KVM_CSR_CRMD
KVM_LONG_L tp, a2, KVM_ARCH_HGP
KVM_LONG_L sp, a2, KVM_ARCH_HSTACK
/* restore per cpu register */
KVM_LONG_L $r21, a2, KVM_ARCH_HPERCPU
KVM_LONG_ADDI sp, sp, -PT_SIZE
/* Prepare handle exception */
or a0, s0, zero
or a1, s1, zero
KVM_LONG_L t8, a2, KVM_ARCH_HANDLE_EXIT
jirl ra,t8, 0
ori t0, zero, KVM_CRMD_IE
csrxchg zero, t0, KVM_CSR_CRMD
or a2, s1, zero
KVM_LONG_ADDI a2, a2, KVM_VCPU_ARCH
andi t0, a0, RESUME_HOST
bnez t0, ret_to_host
INT_S zero, a2, KVM_ARCH_ISHYPCALL
ret_to_guest:
/* Save per cpu register again, maybe switched to another cpu */
KVM_LONG_S $r21, a2, KVM_ARCH_HPERCPU
/* Save kvm_vcpu to kscratch */
csrwr s1, KVM_VCPU_KS
kvm_switch_to_guest a2 REG_A2 t0 t1
ret_to_host:
KVM_LONG_L a2, a2, KVM_ARCH_HSTACK
addi.d a2, a2, -PT_SIZE
srai.w a3, a0, 2
or a0, a3, zero
kvm_restore_host_gpr a2
jirl zero, ra, 0
SYM_FUNC_END(kvm_exit_entry)
/*
* int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
*
* @register_param:
* a0: kvm_run* run
* a1: kvm_vcpu* vcpu
*/
SYM_FUNC_START(kvm_enter_guest)
/* allocate space in stack bottom */
KVM_LONG_ADDI a2, sp, -PT_SIZE
/* save host gprs */
kvm_save_host_gpr a2
/* save host crmd,prmd csr to stack */
csrrd a3, KVM_CSR_CRMD
KVM_LONG_S a3, a2, PT_CRMD
csrrd a3, KVM_CSR_PRMD
KVM_LONG_S a3, a2, PT_PRMD
KVM_LONG_ADDI a2, a1, KVM_VCPU_ARCH
KVM_LONG_S sp, a2, KVM_ARCH_HSTACK
KVM_LONG_S tp, a2, KVM_ARCH_HGP
/* Save per cpu register */
KVM_LONG_S $r21, a2, KVM_ARCH_HPERCPU
/* Save kvm_vcpu to kscratch */
csrwr a1, KVM_VCPU_KS
kvm_switch_to_guest a2 REG_A2 t0 t1
SYM_FUNC_END(kvm_enter_guest)
SYM_FUNC_START(__kvm_save_fpu)
fpu_save_double a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_save_fpu)
SYM_FUNC_START(__kvm_restore_fpu)
fpu_restore_double a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_fpu)
SYM_FUNC_START(__kvm_restore_fcsr)
fpu_restore_csr a0 t1
fpu_restore_cc a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_fcsr)
#ifdef CONFIG_CPU_HAS_LSX
SYM_FUNC_START(__kvm_save_lsx)
lsx_save_data a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_save_lsx)
SYM_FUNC_START(__kvm_restore_lsx)
lsx_restore_data a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_lsx)
SYM_FUNC_START(__kvm_restore_lsx_upper)
lsx_restore_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_lsx_upper)
#endif
#ifdef CONFIG_CPU_HAS_LASX
SYM_FUNC_START(__kvm_save_lasx)
lasx_save_data a0 t7
jirl zero, ra, 0
SYM_FUNC_END(__kvm_save_lasx)
SYM_FUNC_START(__kvm_restore_lasx)
lasx_restore_data a0 t7
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_lasx)
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/cmpxchg.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/time.h>
#include <asm/tlb.h>
#include <asm/numa.h>
#include "kvmcpu.h"
#include <linux/kvm_host.h>
#include "trace.h"
#include "kvm_compat.h"
#include "kvmcsr.h"
#include "intc/ls3a_ext_irq.h"
/*
* Loongarch KVM callback handling for not implemented guest exiting
*/
static int _kvm_fault_ni(struct kvm_vcpu *vcpu)
{
unsigned long estat, badv;
unsigned int exccode, inst;
/*
* Fetch the instruction.
*/
badv = vcpu->arch.badv;
estat = vcpu->arch.host_estat;
exccode = (estat & KVM_ESTAT_EXC) >> KVM_ESTAT_EXC_SHIFT;
inst = vcpu->arch.badi;
kvm_err("Exccode: %d PC=%#lx inst=0x%08x BadVaddr=%#lx estat=%#llx\n",
exccode, vcpu->arch.pc, inst, badv, kvm_read_gcsr_estat());
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
{
enum emulation_result er = EMULATE_DONE;
unsigned int rd, rj, csrid;
unsigned long csr_mask;
unsigned long val = 0;
/*
* CSR value mask imm
* rj = 0 means csrrd
* rj = 1 means csrwr
* rj != 0,1 means csrxchg
*/
rd = inst.reg2csr_format.rd;
rj = inst.reg2csr_format.rj;
csrid = inst.reg2csr_format.csr;
/* Process CSR ops */
if (rj == 0) {
/* process csrrd */
val = _kvm_emu_read_csr(vcpu, csrid);
if (er != EMULATE_FAIL)
vcpu->arch.gprs[rd] = val;
} else if (rj == 1) {
/* process csrwr */
val = vcpu->arch.gprs[rd];
_kvm_emu_write_csr(vcpu, csrid, val);
} else {
/* process csrxchg */
val = vcpu->arch.gprs[rd];
csr_mask = vcpu->arch.gprs[rj];
_kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
}
return er;
}
static int _kvm_emu_cache(struct kvm_vcpu *vcpu, larch_inst inst)
{
return EMULATE_DONE;
}
static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_run *run = vcpu->run;
larch_inst inst;
unsigned long curr_pc;
int rd, rj;
unsigned int index;
/*
* Fetch the instruction.
*/
inst.word = vcpu->arch.badi;
curr_pc = vcpu->arch.pc;
update_pc(&vcpu->arch);
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0:
/* cpucfg GSPR */
if (inst.reg2_format.opcode == 0x1B) {
rd = inst.reg2_format.rd;
rj = inst.reg2_format.rj;
++vcpu->stat.cpucfg_exits;
index = vcpu->arch.gprs[rj];
vcpu->arch.gprs[rd] = vcpu->kvm->arch.cpucfgs.cpucfg[index];
if (vcpu->arch.gprs[rd] == 0) {
/*
* Fallback to get host cpucfg info, this is just for
* compatible with older qemu.
*/
vcpu->arch.gprs[rd] = read_cpucfg(index);
/* Nested KVM is not supported */
if (index == 2)
vcpu->arch.gprs[rd] &= ~CPUCFG2_LVZP;
}
er = EMULATE_DONE;
}
break;
case 0x4:
/* csr GSPR */
er = _kvm_handle_csr(vcpu, inst);
break;
case 0x6:
/* iocsr,cache,idle GSPR */
switch (((inst.word >> 22) & 0x3ff)) {
case 0x18:
/* cache GSPR */
er = _kvm_emu_cache(vcpu, inst);
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
break;
case 0x19:
/* iocsr/idle GSPR */
switch (((inst.word >> 15) & 0x1ffff)) {
case 0xc90:
/* iocsr GSPR */
er = _kvm_emu_iocsr(inst, run, vcpu);
break;
case 0xc91:
/* idle GSPR */
er = _kvm_emu_idle(vcpu);
break;
default:
er = EMULATE_FAIL;
break;
}
break;
default:
er = EMULATE_FAIL;
break;
}
break;
default:
er = EMULATE_FAIL;
break;
}
/* Rollback PC only if emulation was unsuccessful */
if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
curr_pc, __func__, inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
vcpu->arch.pc = curr_pc;
}
return er;
}
static int _kvm_check_hypcall(struct kvm_vcpu *vcpu)
{
enum emulation_result ret;
larch_inst inst;
unsigned long curr_pc;
unsigned int code;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
inst.word = vcpu->arch.badi;
code = inst.reg0i15_format.simmediate;
curr_pc = vcpu->arch.pc;
update_pc(&vcpu->arch);
ret = EMULATE_DONE;
switch (code) {
case KVM_HC_CODE_SERIVCE:
ret = EMULATE_PV_HYPERCALL;
break;
case KVM_HC_CODE_SWDBG:
/*
* Only SWDBG(SoftWare DeBug) could stop vm
* code other than 0 is ignored.
*/
ret = EMULATE_DEBUG;
break;
default:
kvm_info("[%#lx] HYPCALL %#03x unsupported\n", vcpu->arch.pc, code);
break;
}
if (ret == EMULATE_DEBUG)
vcpu->arch.pc = curr_pc;
return ret;
}
/* Execute cpucfg instruction will tirggerGSPR,
* Also the access to unimplemented csrs 0x15
* 0x16, 0x50~0x53, 0x80, 0x81, 0x90~0x95, 0x98
* 0xc0~0xff, 0x100~0x109, 0x500~0x502,
* cache_op, idle_op iocsr ops the same */
static int _kvm_handle_gspr(struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
vcpu->arch.is_hypcall = 0;
er = _kvm_trap_handle_gspr(vcpu);
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
vcpu->run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else if (er == EMULATE_DO_IOCSR) {
vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
ret = RESUME_HOST;
} else {
kvm_err("%s internal error\n", __func__);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int _kvm_handle_hypcall(struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
vcpu->arch.is_hypcall = 0;
er = _kvm_check_hypcall(vcpu);
if (er == EMULATE_PV_HYPERCALL)
ret = _kvm_handle_pv_hcall(vcpu);
else if (er == EMULATE_DEBUG) {
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
ret = RESUME_HOST;
} else
ret = RESUME_GUEST;
return ret;
}
static int _kvm_handle_gcm(struct kvm_vcpu *vcpu)
{
int ret, subcode;
vcpu->arch.is_hypcall = 0;
ret = RESUME_GUEST;
subcode = (vcpu->arch.host_estat & KVM_ESTAT_ESUBCODE) >> KVM_ESTAT_ESUBCODE_SHIFT;
if ((subcode != EXCSUBCODE_GCSC) && (subcode != EXCSUBCODE_GCHC)) {
kvm_err("%s internal error\n", __func__);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
/**
* _kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use fpu which hasn't been allowed
* by the root context.
*/
static int _kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
/*
* If guest FPU not present, the FPU operation should have been
* treated as a reserved instruction!
* If FPU already in use, we shouldn't get this at all.
*/
if (WARN_ON(!_kvm_guest_has_fpu(&vcpu->arch) ||
vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
kvm_err("%s internal error\n", __func__);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
kvm_own_fpu(vcpu);
return RESUME_GUEST;
}
/**
* _kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use LSX when it is disabled in the root
* context.
*/
static int _kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
/*
* If LSX not present or not exposed to guest, the LSX operation
* should have been treated as a reserved instruction!
* If LSX already in use, we shouldn't get this at all.
*/
if (!_kvm_guest_has_lsx(&vcpu->arch) ||
!(kvm_read_gcsr_euen() & KVM_EUEN_LSXEN) ||
vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
kvm_err("%s internal error, lsx %d guest euen %llx aux %x",
__func__, _kvm_guest_has_lsx(&vcpu->arch),
kvm_read_gcsr_euen(), vcpu->arch.aux_inuse);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
#ifdef CONFIG_CPU_HAS_LSX
kvm_own_lsx(vcpu);
#endif
return RESUME_GUEST;
}
bool _kvm_guest_has_lasx(struct kvm_vcpu *vcpu)
{
return cpu_has_lasx && vcpu->arch.lsx_enabled && vcpu->kvm->arch.cpucfg_lasx;
}
/**
* _kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use LASX when it is disabled in the root
* context.
*/
static int _kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
/*
* If LASX not present or not exposed to guest, the LASX operation
* should have been treated as a reserved instruction!
* If LASX already in use, we shouldn't get this at all.
*/
if (!_kvm_guest_has_lasx(vcpu) ||
!(kvm_read_gcsr_euen() & KVM_EUEN_LSXEN) ||
!(kvm_read_gcsr_euen() & KVM_EUEN_LASXEN) ||
vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
kvm_err("%s internal error, lasx %d guest euen %llx aux %x",
__func__, _kvm_guest_has_lasx(vcpu),
kvm_read_gcsr_euen(), vcpu->arch.aux_inuse);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
#ifdef CONFIG_CPU_HAS_LASX
kvm_own_lasx(vcpu);
#endif
return RESUME_GUEST;
}
static int _kvm_handle_read_fault(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
ulong badv = vcpu->arch.badv;
larch_inst inst;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (kvm_handle_mm_fault(vcpu, badv, false)) {
/* A code fetch fault doesn't count as an MMIO */
if (kvm_is_ifetch_fault(&vcpu->arch)) {
kvm_err("%s ifetch error addr:%lx\n", __func__, badv);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Treat as MMIO */
inst.word = vcpu->arch.badi;
er = _kvm_emu_mmio_read(vcpu, inst);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load failed: PC: %#lx, BadVaddr: %#lx\n",
vcpu->arch.pc, badv);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int _kvm_handle_write_fault(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
ulong badv = vcpu->arch.badv;
larch_inst inst;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (kvm_handle_mm_fault(vcpu, badv, true)) {
/* Treat as MMIO */
inst.word = vcpu->arch.badi;
er = _kvm_emu_mmio_write(vcpu, inst);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store failed: PC: %#lx, BadVaddr: %#lx\n",
vcpu->arch.pc, badv);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int _kvm_handle_debug(struct kvm_vcpu *vcpu)
{
uint32_t fwps, mwps;
fwps = kvm_csr_readq(KVM_CSR_FWPS);
mwps = kvm_csr_readq(KVM_CSR_MWPS);
if (fwps & 0xff)
kvm_csr_writeq(fwps, KVM_CSR_FWPS);
if (mwps & 0xff)
kvm_csr_writeq(mwps, KVM_CSR_MWPS);
vcpu->run->debug.arch.exception = KVM_EXCCODE_WATCH;
vcpu->run->debug.arch.fwps = fwps;
vcpu->run->debug.arch.mwps = mwps;
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
return RESUME_HOST;
}
static exit_handle_fn _kvm_fault_tables[KVM_INT_START] = {
[KVM_EXCCODE_TLBL] = _kvm_handle_read_fault,
[KVM_EXCCODE_TLBS] = _kvm_handle_write_fault,
[KVM_EXCCODE_TLBI] = _kvm_handle_read_fault,
[KVM_EXCCODE_TLBM] = _kvm_handle_write_fault,
[KVM_EXCCODE_TLBRI] = _kvm_handle_read_fault,
[KVM_EXCCODE_TLBXI] = _kvm_handle_read_fault,
[KVM_EXCCODE_FPDIS] = _kvm_handle_fpu_disabled,
[KVM_EXCCODE_LSXDIS] = _kvm_handle_lsx_disabled,
[KVM_EXCCODE_LASXDIS] = _kvm_handle_lasx_disabled,
[KVM_EXCCODE_WATCH] = _kvm_handle_debug,
[KVM_EXCCODE_GSPR] = _kvm_handle_gspr,
[KVM_EXCCODE_HYP] = _kvm_handle_hypcall,
[KVM_EXCCODE_GCM] = _kvm_handle_gcm,
};
int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
{
return _kvm_fault_tables[fault](vcpu);
}
void _kvm_init_fault(void)
{
int i;
for (i = 0; i < KVM_INT_START; i++)
if (!_kvm_fault_tables[i])
_kvm_fault_tables[i] = _kvm_fault_ni;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/kvm_para.h>
#include <asm/fpu.h>
/* FPU/LSX context management */
void __kvm_save_fpu(struct loongarch_fpu *fpu);
void __kvm_restore_fpu(struct loongarch_fpu *fpu);
void __kvm_restore_fcsr(struct loongarch_fpu *fpu);
void kvm_save_fpu(struct kvm_vcpu *cpu)
{
return __kvm_save_fpu(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_save_fpu);
void kvm_restore_fpu(struct kvm_vcpu *cpu)
{
return __kvm_restore_fpu(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_restore_fpu);
void kvm_restore_fcsr(struct kvm_vcpu *cpu)
{
return __kvm_restore_fcsr(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_restore_fcsr);
#ifdef CONFIG_CPU_HAS_LSX
void __kvm_save_lsx(struct loongarch_fpu *fpu);
void __kvm_restore_lsx(struct loongarch_fpu *fpu);
void __kvm_restore_lsx_upper(struct loongarch_fpu *fpu);
void kvm_save_lsx(struct kvm_vcpu *cpu)
{
return __kvm_save_lsx(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_save_lsx);
void kvm_restore_lsx(struct kvm_vcpu *cpu)
{
return __kvm_restore_lsx(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_restore_lsx);
void kvm_restore_lsx_upper(struct kvm_vcpu *cpu)
{
return __kvm_restore_lsx_upper(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_restore_lsx_upper);
#endif
#ifdef CONFIG_CPU_HAS_LSX
void __kvm_save_lasx(struct loongarch_fpu *fpu);
void __kvm_restore_lasx(struct loongarch_fpu *fpu);
void __kvm_restore_lasx_upper(struct loongarch_fpu *fpu);
void kvm_save_lasx(struct kvm_vcpu *cpu)
{
return __kvm_save_lasx(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_save_lasx);
void kvm_restore_lasx(struct kvm_vcpu *cpu)
{
return __kvm_restore_lasx(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_restore_lasx);
void kvm_restore_lasx_upper(struct kvm_vcpu *cpu)
{
return _restore_lasx_upper(&cpu->arch.fpu);
}
EXPORT_SYMBOL_GPL(kvm_restore_lasx_upper);
#endif
EXPORT_SYMBOL_GPL(kvm_enter_guest);
EXPORT_SYMBOL_GPL(kvm_exception_entry);
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/sched/stat.h>
#include <asm/kvm_para.h>
#include "intc/ls3a_ipi.h"
#include "kvm_compat.h"
int kvm_virt_ipi(struct kvm_vcpu *vcpu)
{
int ret = 0;
u64 ipi_bitmap;
unsigned int min, action, cpu;
ipi_bitmap = vcpu->arch.gprs[KVM_REG_A1];
min = vcpu->arch.gprs[KVM_REG_A2];
action = vcpu->arch.gprs[KVM_REG_A3];
if (ipi_bitmap) {
cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
while (cpu < BITS_PER_LONG) {
kvm_helper_send_ipi(vcpu, cpu + min, action);
cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
}
}
return ret;
}
int kvm_save_notify(struct kvm_vcpu *vcpu)
{
unsigned long num, id, data;
int ret = 0;
num = vcpu->arch.gprs[KVM_REG_A0];
id = vcpu->arch.gprs[KVM_REG_A1];
data = vcpu->arch.gprs[KVM_REG_A2];
switch (id) {
case KVM_FEATURE_STEAL_TIME:
if (!sched_info_on())
break;
vcpu->arch.st.guest_addr = data;
kvm_debug("cpu :%d addr:%lx\n", vcpu->vcpu_id, data);
vcpu->arch.st.last_steal = current->sched_info.run_delay;
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
break;
default:
break;
};
return ret;
};
static int _kvm_pv_feature(struct kvm_vcpu *vcpu)
{
int feature = vcpu->arch.gprs[KVM_REG_A1];
int ret = KVM_RET_NOT_SUPPORTED;
switch (feature) {
case KVM_FEATURE_STEAL_TIME:
if (sched_info_on())
ret = KVM_RET_SUC;
break;
case KVM_FEATURE_MULTI_IPI:
ret = KVM_RET_SUC;
break;
default:
break;
}
return ret;
}
/*
* hypcall emulation always return to guest, Caller should check retval.
*/
int _kvm_handle_pv_hcall(struct kvm_vcpu *vcpu)
{
unsigned long func = vcpu->arch.gprs[KVM_REG_A0];
int hyp_ret = KVM_RET_NOT_SUPPORTED;
switch (func) {
case KVM_HC_FUNC_FEATURE:
hyp_ret = _kvm_pv_feature(vcpu);
break;
case KVM_HC_FUNC_NOTIFY:
hyp_ret = kvm_save_notify(vcpu);
break;
case KVM_HC_FUNC_IPI:
hyp_ret = kvm_virt_ipi(vcpu);
break;
default:
kvm_info("[%#lx] hvc func:%#lx unsupported\n", vcpu->arch.pc, func);
break;
};
vcpu->arch.gprs[KVM_REG_A0] = hyp_ret;
return RESUME_GUEST;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/debugfs.h>
#include <linux/kvm_host.h>
#include <linux/seq_file.h>
#include "kvmcpu.h"
#include "ls3a_ext_irq.h"
#include "ls7a_irq.h"
#ifdef CONFIG_DEBUG_FS
static int irqchip_state_show(struct seq_file *m, void *v)
{
struct kvm *kvm = m->private;
kvm_get_kvm(kvm);
kvm_dump_ls3a_extirq_state(m, kvm->arch.v_extirq);
kvm_dump_ls7a_ioapic_state(m, kvm->arch.v_ioapic);
kvm_put_kvm(kvm);
return 0;
}
static int irqchip_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, irqchip_state_show, inode->i_private);
}
static const struct file_operations irqchip_debug_fops = {
.open = irqchip_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void irqchip_debug_init(struct kvm *kvm)
{
debugfs_create_file("irqchip-state", 0444, kvm->debugfs_dentry, kvm,
&irqchip_debug_fops);
}
#else
void irqchip_debug_init(struct kvm *kvm) {}
#endif
void irqchip_debug_destroy(struct kvm *kvm)
{
}
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __LS3A_KVM_EXT_IRQ_H
#define __LS3A_KVM_EXT_IRQ_H
#include <linux/mm_types.h>
#include <linux/hrtimer.h>
#include <linux/kvm_host.h>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
#include <kvm/iodev.h>
#define IOCSR_EXTIOI_ADDR KVM_IOCSR_EXTIOI_NODEMAP_BASE
#define EXTIOI_ADDR_OFF 0x10000
#define EXTIOI_REG_BASE (LOONGSON_VIRT_REG_BASE + EXTIOI_ADDR_OFF)
#define EXTIOI_REG_END (EXTIOI_REG_BASE + 0x20000)
#define EXTIOI_ADDR_SIZE (EXTIOI_REG_END - EXTIOI_REG_BASE)
#define EXTIOI_PERCORE_REG_OFF 0x10000
#define EXTIOI_PERCORE_REG_END (EXTIOI_PERCORE_REG_OFF + 0x10000)
#define EXTIOI_ADDR(off) (EXTIOI_REG_BASE + (off) - IOCSR_EXTIOI_ADDR)
#define EXTIOI_PERCORE_ADDR(id, off) \
(EXTIOI_REG_BASE + EXTIOI_PERCORE_REG_OFF + ((id) << 8) + (off))
#define EXTIOI_NODETYPE_START (KVM_IOCSR_EXTIOI_NODEMAP_BASE - IOCSR_EXTIOI_ADDR)
#define EXTIOI_NODETYPE_END (EXTIOI_NODETYPE_START + 0x20)
#define EXTIOI_IPMAP_START (KVM_IOCSR_EXTIOI_IPMAP_BASE - IOCSR_EXTIOI_ADDR)
#define EXTIOI_IPMAP_END (EXTIOI_IPMAP_START + 0x8)
#define EXTIOI_ENABLE_START (KVM_IOCSR_EXTIOI_EN_BASE - IOCSR_EXTIOI_ADDR)
#define EXTIOI_ENABLE_END (EXTIOI_ENABLE_START + 0x20)
#define EXTIOI_BOUNCE_START (KVM_IOCSR_EXTIOI_BOUNCE_BASE - IOCSR_EXTIOI_ADDR)
#define EXTIOI_BOUNCE_END (EXTIOI_BOUNCE_START + 0x20)
#define EXTIOI_ISR_START (0x1700 - IOCSR_EXTIOI_ADDR)
#define EXTIOI_ISR_END (EXTIOI_ISR_START + 0x20)
#define EXTIOI_COREMAP_START (KVM_IOCSR_EXTIOI_ROUTE_BASE - IOCSR_EXTIOI_ADDR)
#define EXTIOI_COREMAP_END (EXTIOI_COREMAP_START + 0x100)
#define EXTIOI_COREISR_START (EXTIOI_PERCORE_REG_OFF)
#define EXTIOI_COREISR_END (EXTIOI_PERCORE_REG_END)
#define LS3A_INTC_IP 8
#define EXTIOI_IRQS KVM_EXTIOI_IRQS
#define EXTIOI_IRQS_BITMAP_SIZE (EXTIOI_IRQS / 8)
/* map to ipnum per 32 irqs */
#define EXTIOI_IRQS_IPMAP_SIZE (EXTIOI_IRQS / 32)
#define EXTIOI_IRQS_PER_GROUP KVM_EXTIOI_IRQS_PER_GROUP
#define EXTIOI_IRQS_COREMAP_SIZE (EXTIOI_IRQS)
#define EXTIOI_IRQS_NODETYPE_SIZE KVM_EXTIOI_IRQS_NODETYPE_SIZE
typedef struct kvm_ls3a_extirq_state {
union ext_en {
uint64_t reg_u64[EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[EXTIOI_IRQS_BITMAP_SIZE];
} ext_en;
union bounce {
uint64_t reg_u64[EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[EXTIOI_IRQS_BITMAP_SIZE];
} bounce;
union ext_isr {
uint64_t reg_u64[EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[EXTIOI_IRQS_BITMAP_SIZE];
} ext_isr;
union ext_core_isr {
uint64_t reg_u64[KVM_MAX_VCPUS][EXTIOI_IRQS_BITMAP_SIZE / 8];
uint32_t reg_u32[KVM_MAX_VCPUS][EXTIOI_IRQS_BITMAP_SIZE / 4];
uint8_t reg_u8[KVM_MAX_VCPUS][EXTIOI_IRQS_BITMAP_SIZE];
} ext_core_isr;
union ip_map {
uint64_t reg_u64;
uint32_t reg_u32[EXTIOI_IRQS_IPMAP_SIZE / 4];
uint8_t reg_u8[EXTIOI_IRQS_IPMAP_SIZE];
} ip_map;
union core_map {
uint64_t reg_u64[EXTIOI_IRQS_COREMAP_SIZE / 8];
uint32_t reg_u32[EXTIOI_IRQS_COREMAP_SIZE / 4];
uint8_t reg_u8[EXTIOI_IRQS_COREMAP_SIZE];
} core_map;
union {
uint64_t reg_u64[EXTIOI_IRQS_NODETYPE_SIZE / 4];
uint32_t reg_u32[EXTIOI_IRQS_NODETYPE_SIZE / 2];
uint16_t reg_u16[EXTIOI_IRQS_NODETYPE_SIZE];
uint8_t reg_u8[EXTIOI_IRQS_NODETYPE_SIZE * 2];
} node_type;
/*software state */
uint8_t ext_sw_ipmap[EXTIOI_IRQS];
uint8_t ext_sw_coremap[EXTIOI_IRQS];
uint8_t ext_sw_ipisr[KVM_MAX_VCPUS][LS3A_INTC_IP][EXTIOI_IRQS_BITMAP_SIZE];
} LS3AExtirqState;
struct ls3a_kvm_extirq {
spinlock_t lock;
struct kvm *kvm;
atomic64_t enabled;
struct kvm_io_device device;
struct kvm_ls3a_extirq_state ls3a_ext_irq;
};
static inline struct ls3a_kvm_extirq *ls3a_ext_irqchip(struct kvm *kvm)
{
return kvm->arch.v_extirq;
}
static inline int ls3a_extirq_in_kernel(struct kvm *kvm)
{
int ret;
ret = (ls3a_ext_irqchip(kvm) != NULL);
return ret;
}
void ext_irq_handler(struct kvm *kvm, int irq, int level);
int kvm_create_ls3a_ext_irq(struct kvm *kvm);
int kvm_get_ls3a_extirq(struct kvm *kvm,
struct kvm_loongarch_ls3a_extirq_state *state);
int kvm_set_ls3a_extirq(struct kvm *kvm,
struct kvm_loongarch_ls3a_extirq_state *state);
void kvm_destroy_ls3a_ext_irq(struct kvm *kvm);
void msi_irq_handler(struct kvm *kvm, int irq, int level);
int kvm_setup_ls3a_extirq(struct kvm *kvm);
int kvm_enable_ls3a_extirq(struct kvm *kvm, bool enable);
void kvm_dump_ls3a_extirq_state(struct seq_file *m, struct ls3a_kvm_extirq *irqchip);
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include "kvmcpu.h"
#include "ls3a_ipi.h"
#include "ls7a_irq.h"
#include "ls3a_ext_irq.h"
#define ls3a_gipi_lock(s, flags) spin_lock_irqsave(&s->lock, flags)
#define ls3a_gipi_unlock(s, flags) spin_unlock_irqrestore(&s->lock, flags)
extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_loongarch_interrupt *irq);
int kvm_helper_send_ipi(struct kvm_vcpu *vcpu, unsigned int cpu, unsigned int action)
{
struct kvm *kvm = vcpu->kvm;
struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm);
gipiState *s = &(ipi->ls3a_gipistate);
unsigned long flags;
struct kvm_loongarch_interrupt irq;
kvm->stat.pip_write_exits++;
ls3a_gipi_lock(ipi, flags);
if (s->core[cpu].status == 0) {
irq.cpu = cpu;
irq.irq = LARCH_INT_IPI;
kvm_vcpu_ioctl_interrupt(kvm->vcpus[cpu], &irq);
}
s->core[cpu].status |= action;
ls3a_gipi_unlock(ipi, flags);
return 0;
}
static int ls3a_gipi_writel(struct ls3a_kvm_ipi *ipi, gpa_t addr,
int len, const void *val)
{
uint64_t data, offset;
struct kvm_loongarch_interrupt irq;
gipiState *s = &(ipi->ls3a_gipistate);
uint32_t cpu, action_data;
struct kvm *kvm;
void *pbuf;
int mailbox, action;
kvm = ipi->kvm;
cpu = (addr >> 8) & 0xff;
data = *(uint64_t *)val;
offset = addr & 0xFF;
BUG_ON(offset & (len - 1));
switch (offset) {
case CORE0_STATUS_OFF:
printk("CORE0_SET_OFF Can't be write\n");
break;
case CORE0_EN_OFF:
s->core[cpu].en = data;
break;
case CORE0_IPI_SEND:
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
action = (data & 0x1f);
action_data = (1 << action);
if (s->core[cpu].status == 0) {
irq.cpu = cpu;
irq.irq = LARCH_INT_IPI;
if (likely(kvm->vcpus[cpu])) {
kvm_vcpu_ioctl_interrupt(kvm->vcpus[cpu], &irq);
}
}
s->core[cpu].status |= action_data;
break;
case CORE0_SET_OFF:
pr_info("CORE0_SET_OFF simulation is required\n");
break;
case CORE0_CLEAR_OFF:
s->core[cpu].status &= ~data;
if (!s->core[cpu].status) {
irq.cpu = cpu;
irq.irq = -LARCH_INT_IPI;
if (likely(kvm->vcpus[cpu]))
kvm_vcpu_ioctl_interrupt(kvm->vcpus[cpu], &irq);
else
kvm_err("Failed lower ipi irq target cpu:%d\n", cpu);
}
break;
case CORE0_MAIL_SEND:
cpu = ((data & 0xffffffff) >> 16) & 0x3ff;
mailbox = ((data & 0xffffffff) >> 2) & 0x7;
pbuf = (void *)s->core[cpu].buf + mailbox * 4;
*(unsigned int *)pbuf = (unsigned int)(data >> 32);
break;
case 0x20 ... 0x3c:
pbuf = (void *)s->core[cpu].buf + (offset - 0x20);
if (len == 1)
*(unsigned char *)pbuf = (unsigned char)data;
else if (len == 2)
*(unsigned short *)pbuf = (unsigned short)data;
else if (len == 4)
*(unsigned int *)pbuf = (unsigned int)data;
else if (len == 8)
*(unsigned long *)pbuf = (unsigned long)data;
break;
default:
printk("ls3a_gipi_writel with unknown addr %llx \n", addr);
break;
}
return 0;
}
static uint64_t ls3a_gipi_readl(struct ls3a_kvm_ipi *ipi,
gpa_t addr, int len, void *val)
{
uint64_t offset;
uint64_t ret = 0;
gipiState *s = &(ipi->ls3a_gipistate);
uint32_t cpu;
void *pbuf;
cpu = (addr >> 8) & 0xff;
offset = addr & 0xFF;
BUG_ON(offset & (len - 1));
switch (offset) {
case CORE0_STATUS_OFF:
ret = s->core[cpu].status;
break;
case CORE0_EN_OFF:
ret = s->core[cpu].en;
break;
case CORE0_SET_OFF:
ret = 0;
break;
case CORE0_CLEAR_OFF:
ret = 0;
break;
case 0x20 ... 0x3c:
pbuf = (void *)s->core[cpu].buf + (offset - 0x20);
if (len == 1)
ret = *(unsigned char *)pbuf;
else if (len == 2)
ret = *(unsigned short *)pbuf;
else if (len == 4)
ret = *(unsigned int *)pbuf;
else if (len == 8)
ret = *(unsigned long *)pbuf;
break;
default:
printk("ls3a_gipi_readl with unknown addr %llx \n", addr);
break;
}
*(uint64_t *)val = ret;
return ret;
}
static int kvm_ls3a_ipi_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
struct ls3a_kvm_ipi *ipi;
ipi_io_device *ipi_device;
unsigned long flags;
ipi_device = container_of(dev, ipi_io_device, device);
ipi = ipi_device->ipi;
ipi->kvm->stat.pip_write_exits++;
ls3a_gipi_lock(ipi, flags);
ls3a_gipi_writel(ipi, addr, len, val);
ls3a_gipi_unlock(ipi, flags);
return 0;
}
static int kvm_ls3a_ipi_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
struct ls3a_kvm_ipi *ipi;
ipi_io_device *ipi_device;
unsigned long flags;
ipi_device = container_of(dev, ipi_io_device, device);
ipi = ipi_device->ipi;
ipi->kvm->stat.pip_read_exits++;
ls3a_gipi_lock(ipi, flags);
ls3a_gipi_readl(ipi, addr, len, val);
ls3a_gipi_unlock(ipi, flags);
return 0;
}
static const struct kvm_io_device_ops kvm_ls3a_ipi_ops = {
.read = kvm_ls3a_ipi_read,
.write = kvm_ls3a_ipi_write,
};
void kvm_destroy_ls3a_ipi(struct kvm *kvm)
{
struct kvm_io_device *device;
struct ls3a_kvm_ipi *vipi = kvm->arch.v_gipi;
if (!vipi)
return;
device = &vipi->dev_ls3a_ipi.device;
kvm_io_bus_unregister_dev(vipi->kvm, KVM_MMIO_BUS, device);
kfree(vipi);
}
int kvm_create_ls3a_ipi(struct kvm *kvm)
{
struct ls3a_kvm_ipi *s;
unsigned long addr;
struct kvm_io_device *device;
int ret;
s = kzalloc(sizeof(struct ls3a_kvm_ipi), GFP_KERNEL);
if (!s)
return -ENOMEM;
spin_lock_init(&s->lock);
s->kvm = kvm;
/*
* Initialize MMIO device
*/
device = &s->dev_ls3a_ipi.device;
kvm_iodevice_init(device, &kvm_ls3a_ipi_ops);
addr = SMP_MAILBOX;
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
addr, KVM_IOCSR_IPI_ADDR_SIZE, device);
mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kvm_err("%s Initialize MMIO dev err ret:%d\n", __func__, ret);
goto err;
} else {
s->dev_ls3a_ipi.ipi = s;
}
kvm->arch.v_gipi = s;
return 0;
err:
kfree(s);
return -EFAULT;
}
int kvm_get_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state)
{
struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm);
gipiState *ipi_state = &(ipi->ls3a_gipistate);
unsigned long flags;
ls3a_gipi_lock(ipi, flags);
memcpy(state, ipi_state, sizeof(gipiState));
ls3a_gipi_unlock(ipi, flags);
return 0;
}
int kvm_set_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state)
{
struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm);
gipiState *ipi_state = &(ipi->ls3a_gipistate);
unsigned long flags;
if (!ipi)
return -EINVAL;
ls3a_gipi_lock(ipi, flags);
memcpy(ipi_state, state, sizeof(gipiState));
ls3a_gipi_unlock(ipi, flags);
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __LS3A_KVM_IPI_H
#define __LS3A_KVM_IPI_H
#include <linux/mm_types.h>
#include <linux/hrtimer.h>
#include <linux/kvm_host.h>
#include <linux/spinlock.h>
#include <kvm/iodev.h>
typedef struct gipi_single {
uint32_t status;
uint32_t en;
uint32_t set;
uint32_t clear;
uint64_t buf[4];
} gipi_single;
typedef struct gipiState {
gipi_single core[KVM_MAX_VCPUS];
} gipiState;
struct ls3a_kvm_ipi;
typedef struct ipi_io_device {
struct ls3a_kvm_ipi *ipi;
struct kvm_io_device device;
int nodeNum;
} ipi_io_device;
struct ls3a_kvm_ipi {
spinlock_t lock;
struct kvm *kvm;
gipiState ls3a_gipistate;
int nodeNum;
ipi_io_device dev_ls3a_ipi;
};
#define SMP_MAILBOX (LOONGSON_VIRT_REG_BASE + 0x0000)
#define KVM_IPI_REG_ADDRESS(id, off) (SMP_MAILBOX | (id << 8) | off)
#define KVM_IOCSR_IPI_ADDR_SIZE 0x10000
#define CORE0_STATUS_OFF 0x000
#define CORE0_EN_OFF 0x004
#define CORE0_SET_OFF 0x008
#define CORE0_CLEAR_OFF 0x00c
#define CORE0_BUF_20 0x020
#define CORE0_BUF_28 0x028
#define CORE0_BUF_30 0x030
#define CORE0_BUF_38 0x038
#define CORE0_IPI_SEND 0x040
#define CORE0_MAIL_SEND 0x048
static inline struct ls3a_kvm_ipi *ls3a_ipi_irqchip(struct kvm *kvm)
{
return kvm->arch.v_gipi;
}
static inline int ls3a_ipi_in_kernel(struct kvm *kvm)
{
int ret;
ret = (ls3a_ipi_irqchip(kvm) != NULL);
return ret;
}
int kvm_create_ls3a_ipi(struct kvm *kvm);
void kvm_destroy_ls3a_ipi(struct kvm *kvm);
int kvm_set_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state);
int kvm_get_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state);
int kvm_helper_send_ipi(struct kvm_vcpu *vcpu, unsigned int cpu, unsigned int action);
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/highmem.h>
#include <linux/mm.h>
#include "ls3a_ipi.h"
#include "ls7a_irq.h"
#include "ls3a_ext_irq.h"
void ls7a_ioapic_lock(struct ls7a_kvm_ioapic *s, unsigned long *flags)
{
unsigned long tmp;
spin_lock_irqsave(&s->lock, tmp);
*flags = tmp;
}
void ls7a_ioapic_unlock(struct ls7a_kvm_ioapic *s, unsigned long *flags)
{
unsigned long tmp;
tmp = *flags;
spin_unlock_irqrestore(&s->lock, tmp);
}
static void kvm_ls7a_ioapic_raise(struct kvm *kvm, unsigned long mask)
{
unsigned long irqnum, val;
struct ls7a_kvm_ioapic *s = ls7a_ioapic_irqchip(kvm);
struct kvm_ls7a_ioapic_state *state;
struct kvm_loongarch_interrupt irq;
int i;
state = &s->ls7a_ioapic;
irq.cpu = -1;
val = mask & state->intirr & (~state->int_mask);
val &= ~state->intisr;
for_each_set_bit(i, &val, 64) {
state->intisr |= 0x1ULL << i;
irqnum = state->htmsi_vector[i];
kvm_debug("msi_irq_handler,%ld,up\n", irqnum);
msi_irq_handler(kvm, irqnum, 1);
}
kvm->stat.ls7a_ioapic_update++;
}
static void kvm_ls7a_ioapic_lower(struct kvm *kvm, unsigned long mask)
{
unsigned long irqnum, val;
struct ls7a_kvm_ioapic *s = ls7a_ioapic_irqchip(kvm);
struct kvm_ls7a_ioapic_state *state;
struct kvm_loongarch_interrupt irq;
int i;
state = &s->ls7a_ioapic;
irq.cpu = -1;
val = mask & state->intisr;
for_each_set_bit(i, &val, 64) {
state->intisr &= ~(0x1ULL << i);
irqnum = state->htmsi_vector[i];
kvm_debug("msi_irq_handler,%ld,down\n", irqnum);
msi_irq_handler(kvm, irqnum, 0);
}
kvm->stat.ls7a_ioapic_update++;
}
int kvm_ls7a_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level, bool line_status)
{
if (!level)
return -1;
kvm_debug("msi data is 0x%x\n", e->msi.data);
msi_irq_handler(kvm, e->msi.data, 1);
return 0;
}
int kvm_ls7a_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
{
struct kvm_kernel_irq_routing_entry route;
if (msi->flags != 0)
return -EINVAL;
kvm->stat.ls7a_msi_irq++;
route.msi.address_lo = msi->address_lo;
route.msi.address_hi = msi->address_hi;
route.msi.data = msi->data;
kvm_debug("msi data is 0x%x\n", route.msi.data);
return kvm_ls7a_set_msi(&route, kvm,
KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
}
int kvm_ls7a_ioapic_set_irq(struct kvm *kvm, int irq, int level)
{
struct ls7a_kvm_ioapic *s;
struct kvm_ls7a_ioapic_state *state;
uint64_t mask = 1ULL << irq;
s = ls7a_ioapic_irqchip(kvm);
state = &s->ls7a_ioapic;
BUG_ON(irq < 0 || irq >= LS7A_IOAPIC_NUM_PINS);
if (state->intedge & mask) {
/* edge triggered */
if (level) {
if ((state->last_intirr & mask) == 0) {
state->intirr |= mask;
kvm_ls7a_ioapic_raise(kvm, mask);
}
state->last_intirr |= mask;
} else
state->last_intirr &= ~mask;
} else {
/* level triggered */
if (!!level) {
if ((state->intirr & mask) == 0) {
state->intirr |= mask;
kvm_ls7a_ioapic_raise(kvm, mask);
}
} else {
if (state->intirr & mask) {
state->intirr &= ~mask;
kvm_ls7a_ioapic_lower(kvm, mask);
}
}
}
kvm->stat.ls7a_ioapic_set_irq++;
return 0;
}
static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s,
gpa_t addr, int len, const void *val)
{
struct kvm *kvm;
struct kvm_ls7a_ioapic_state *state;
int64_t offset_tmp;
uint64_t offset;
uint64_t data, old;
offset = addr & 0xfff;
kvm = s->kvm;
state = &(s->ls7a_ioapic);
if (offset & (len - 1)) {
printk("%s(%d):unaligned address access %llx size %d \n",
__FUNCTION__, __LINE__, addr, len);
return 0;
}
if (8 == len) {
data = *(uint64_t *)val;
switch (offset) {
case LS7A_INT_MASK_OFFSET:
old = state->int_mask;
state->int_mask = data;
if (old & ~data)
kvm_ls7a_ioapic_raise(kvm, old & ~data);
if (~old & data)
kvm_ls7a_ioapic_lower(kvm, ~old & data);
break;
case LS7A_INT_STATUS_OFFSET:
state->intisr = data;
break;
case LS7A_INT_EDGE_OFFSET:
state->intedge = data;
break;
case LS7A_INT_CLEAR_OFFSET:
/*
* only clear edge triggered irq on writing INTCLR reg
* no effect on level triggered irq
*/
data = data & state->intedge;
state->intirr &= ~data;
kvm_ls7a_ioapic_lower(kvm, data);
state->intisr &= (~data);
break;
case LS7A_INT_POL_OFFSET:
state->int_polarity = data;
break;
case LS7A_HTMSI_EN_OFFSET:
state->htmsi_en = data;
break;
case LS7A_AUTO_CTRL0_OFFSET:
case LS7A_AUTO_CTRL1_OFFSET:
break;
default:
WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len);
break;
}
} else if (1 == len) {
data = *(unsigned char *)val;
if (offset >= LS7A_HTMSI_VEC_OFFSET) {
offset_tmp = offset - LS7A_HTMSI_VEC_OFFSET;
if (offset_tmp >= 0 && offset_tmp < 64) {
state->htmsi_vector[offset_tmp] =
(uint8_t)(data & 0xff);
}
} else if (offset >= LS7A_ROUTE_ENTRY_OFFSET) {
offset_tmp = offset - LS7A_ROUTE_ENTRY_OFFSET;
if (offset_tmp >= 0 && offset_tmp < 64) {
state->route_entry[offset_tmp] =
(uint8_t)(data & 0xff);
}
} else {
WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len);
}
} else {
WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len);
}
kvm->stat.ioapic_reg_write++;
return 0;
}
static inline struct ls7a_kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
{
return container_of(dev, struct ls7a_kvm_ioapic, dev_ls7a_ioapic);
}
static int kvm_ls7a_ioapic_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *this,
gpa_t addr, int len, const void *val)
{
struct ls7a_kvm_ioapic *s = to_ioapic(this);
unsigned long flags;
ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags);
ls7a_ioapic_reg_write(s, addr, len, val);
ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags);
return 0;
}
static int ls7a_ioapic_reg_read(struct ls7a_kvm_ioapic *s,
gpa_t addr, int len, void *val)
{
uint64_t offset, offset_tmp;
struct kvm *kvm;
struct kvm_ls7a_ioapic_state *state;
uint64_t result = 0;
state = &(s->ls7a_ioapic);
kvm = s->kvm;
offset = addr & 0xfff;
if (offset & (len - 1)) {
printk("%s(%d):unaligned address access %llx size %d \n",
__FUNCTION__, __LINE__, addr, len);
return 0;
}
if (8 == len) {
switch (offset) {
case LS7A_INT_MASK_OFFSET:
result = state->int_mask;
break;
case LS7A_INT_STATUS_OFFSET:
result = state->intisr & (~state->int_mask);
break;
case LS7A_INT_EDGE_OFFSET:
result = state->intedge;
break;
case LS7A_INT_POL_OFFSET:
result = state->int_polarity;
break;
case LS7A_HTMSI_EN_OFFSET:
result = state->htmsi_en;
break;
case LS7A_AUTO_CTRL0_OFFSET:
case LS7A_AUTO_CTRL1_OFFSET:
break;
case LS7A_INT_ID_OFFSET:
result = LS7A_INT_ID_VER;
result = (result << 32) + LS7A_INT_ID_VAL;
break;
default:
WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len);
break;
}
if (val != NULL)
*(uint64_t *)val = result;
} else if (1 == len) {
if (offset >= LS7A_HTMSI_VEC_OFFSET) {
offset_tmp = offset - LS7A_HTMSI_VEC_OFFSET;
if (offset_tmp >= 0 && offset_tmp < 64) {
result = state->htmsi_vector[offset_tmp];
}
} else if (offset >= LS7A_ROUTE_ENTRY_OFFSET) {
offset_tmp = offset - LS7A_ROUTE_ENTRY_OFFSET;
if (offset_tmp >= 0 && offset_tmp < 64) {
result = state->route_entry[offset_tmp];
}
} else {
WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len);
}
if (val != NULL)
*(unsigned char *)val = result;
} else {
WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len);
}
kvm->stat.ioapic_reg_read++;
return result;
}
static int kvm_ls7a_ioapic_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *this,
gpa_t addr, int len, void *val)
{
struct ls7a_kvm_ioapic *s = to_ioapic(this);
unsigned long flags;
uint64_t result = 0;
ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags);
result = ls7a_ioapic_reg_read(s, addr, len, val);
ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags);
return 0;
}
static const struct kvm_io_device_ops kvm_ls7a_ioapic_ops = {
.read = kvm_ls7a_ioapic_read,
.write = kvm_ls7a_ioapic_write,
};
static int kvm_ls7a_ioapic_alias_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *this, gpa_t addr, int len, void *val)
{
struct ls7a_kvm_ioapic *s;
unsigned long flags;
s = container_of(this, struct ls7a_kvm_ioapic, ls7a_ioapic_alias);
ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags);
ls7a_ioapic_reg_read(s, addr, len, val);
ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags);
return 0;
}
static int kvm_ls7a_ioapic_alias_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *this, gpa_t addr, int len, const void *val)
{
struct ls7a_kvm_ioapic *s;
unsigned long flags;
s = container_of(this, struct ls7a_kvm_ioapic, ls7a_ioapic_alias);
ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags);
ls7a_ioapic_reg_write(s, addr, len, val);
ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags);
return 0;
}
static const struct kvm_io_device_ops kvm_ls7a_ioapic_ops_alias = {
.read = kvm_ls7a_ioapic_alias_read,
.write = kvm_ls7a_ioapic_alias_write,
};
int kvm_create_ls7a_ioapic(struct kvm *kvm)
{
struct ls7a_kvm_ioapic *s;
int ret;
unsigned long ls7a_ioapic_reg_base;
s = kzalloc(sizeof(struct ls7a_kvm_ioapic), GFP_KERNEL);
if (!s)
return -ENOMEM;
spin_lock_init(&s->lock);
s->kvm = kvm;
ls7a_ioapic_reg_base = LS7A_IOAPIC_GUEST_REG_BASE;
/*
* Initialize MMIO device
*/
kvm_iodevice_init(&s->dev_ls7a_ioapic, &kvm_ls7a_ioapic_ops);
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ls7a_ioapic_reg_base,
0x1000, &s->dev_ls7a_ioapic);
if (ret < 0) {
kvm_err("Failed register ioapic, err:%d\n", ret);
goto fail_unlock;
}
ls7a_ioapic_reg_base = LS7A_IOAPIC_GUEST_REG_BASE_ALIAS;
kvm_iodevice_init(&s->ls7a_ioapic_alias, &kvm_ls7a_ioapic_ops_alias);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ls7a_ioapic_reg_base,
0x1000, &s->ls7a_ioapic_alias);
if (ret < 0) {
kvm_err("Failed register alias ioapic, err:%d\n", ret);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
&s->dev_ls7a_ioapic);
goto fail_unlock;
}
mutex_unlock(&kvm->slots_lock);
kvm->arch.v_ioapic = s;
return 0;
fail_unlock:
mutex_unlock(&kvm->slots_lock);
kfree(s);
return -EFAULT;
}
int kvm_get_ls7a_ioapic(struct kvm *kvm, struct ls7a_ioapic_state *state)
{
struct ls7a_kvm_ioapic *ls7a_ioapic = ls7a_ioapic_irqchip(kvm);
struct kvm_ls7a_ioapic_state *ioapic_state =
&(ls7a_ioapic->ls7a_ioapic);
unsigned long flags;
ls7a_ioapic_lock(ls7a_ioapic, &flags);
memcpy(state, ioapic_state, sizeof(struct kvm_ls7a_ioapic_state));
ls7a_ioapic_unlock(ls7a_ioapic, &flags);
kvm->stat.get_ls7a_ioapic++;
return 0;
}
int kvm_set_ls7a_ioapic(struct kvm *kvm, struct ls7a_ioapic_state *state)
{
struct ls7a_kvm_ioapic *ls7a_ioapic = ls7a_ioapic_irqchip(kvm);
struct kvm_ls7a_ioapic_state *ioapic_state =
&(ls7a_ioapic->ls7a_ioapic);
unsigned long flags;
if (!ls7a_ioapic)
return -EINVAL;
ls7a_ioapic_lock(ls7a_ioapic, &flags);
memcpy(ioapic_state, state, sizeof(struct kvm_ls7a_ioapic_state));
ls7a_ioapic_unlock(ls7a_ioapic, &flags);
kvm->stat.set_ls7a_ioapic++;
return 0;
}
void kvm_destroy_ls7a_ioapic(struct kvm *kvm)
{
struct ls7a_kvm_ioapic *vpic = kvm->arch.v_ioapic;
if (!vpic)
return;
kvm_io_bus_unregister_dev(vpic->kvm, KVM_MMIO_BUS,
&vpic->ls7a_ioapic_alias);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_MMIO_BUS,
&vpic->dev_ls7a_ioapic);
kfree(vpic);
}
void kvm_dump_ls7a_ioapic_state(struct seq_file *m,
struct ls7a_kvm_ioapic *ioapic)
{
struct kvm_ls7a_ioapic_state *ioapic_state;
unsigned long flags;
int i = 0;
if (!ioapic)
return;
seq_puts(m, "\nIOAPIC state:\n");
ioapic_state = &(ioapic->ls7a_ioapic);
ls7a_ioapic_lock(ioapic, &flags);
seq_puts(m, "irq masked: ");
for (i = 0; i < 64; i++) {
if (!test_bit(i, (void *)&ioapic_state->int_mask))
seq_printf(m, "%d ", i);
}
seq_printf(m, "\nhtmsi_en:0x%016llx\n"
"intedge:0x%016llx",
ioapic_state->htmsi_en,
ioapic_state->intedge);
seq_puts(m, "\nroute_entry: ");
for (i = 0; i < 64; i++)
seq_printf(m, "%d ", ioapic_state->route_entry[i]);
seq_puts(m, "\nhtmsi_vector: ");
for (i = 0; i < 64; i++)
seq_printf(m, "%d ", ioapic_state->htmsi_vector[i]);
seq_printf(m, "\nintirr:%016llx\n"
"intisr:%016llx\n",
ioapic_state->intirr,
ioapic_state->intisr);
ls7a_ioapic_unlock(ioapic, &flags);
}
此差异已折叠。
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __LOONGHARCH_KVM_IRQ_H__
#define __LOONGHARCH_KVM_IRQ_H__
static inline int irqchip_in_kernel(struct kvm *kvm)
{
return kvm->arch.v_ioapic ? 1 : 0;
}
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册