Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
9a6d77d5
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
9a6d77d5
编写于
3月 24, 2013
作者:
G
Gleb Natapov
浏览文件
操作
浏览文件
下载
差异文件
Merge '
git://github.com/agraf/linux-2.6.git
kvm-ppc-next' into queue
上级
81f4f76b
4fe27d2a
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
133 addition
and
93 deletion
+133
-93
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/api.txt
+4
-0
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_host.h
+1
-0
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/kvm_ppc.h
+1
-2
arch/powerpc/include/uapi/asm/kvm.h
arch/powerpc/include/uapi/asm/kvm.h
+5
-0
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/asm-offsets.c
+1
-0
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s.c
+1
-2
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke.c
+45
-12
arch/powerpc/kvm/booke_interrupts.S
arch/powerpc/kvm/booke_interrupts.S
+39
-3
arch/powerpc/kvm/e500.h
arch/powerpc/kvm/e500.h
+8
-16
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/kvm/e500_mmu_host.c
+27
-57
arch/powerpc/kvm/powerpc.c
arch/powerpc/kvm/powerpc.c
+1
-1
未找到文件。
Documentation/virtual/kvm/api.txt
浏览文件 @
9a6d77d5
...
@@ -1788,6 +1788,10 @@ registers, find a list below:
...
@@ -1788,6 +1788,10 @@ registers, find a list below:
PPC | KVM_REG_PPC_VPA_DTL | 128
PPC | KVM_REG_PPC_VPA_DTL | 128
PPC | KVM_REG_PPC_EPCR | 32
PPC | KVM_REG_PPC_EPCR | 32
PPC | KVM_REG_PPC_EPR | 32
PPC | KVM_REG_PPC_EPR | 32
PPC | KVM_REG_PPC_TCR | 32
PPC | KVM_REG_PPC_TSR | 32
PPC | KVM_REG_PPC_OR_TSR | 32
PPC | KVM_REG_PPC_CLEAR_TSR | 32
ARM registers are mapped using the lower 32 bits. The upper 16 of that
ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number:
is the register group type, or coprocessor number:
...
...
arch/powerpc/include/asm/kvm_host.h
浏览文件 @
9a6d77d5
...
@@ -504,6 +504,7 @@ struct kvm_vcpu_arch {
...
@@ -504,6 +504,7 @@ struct kvm_vcpu_arch {
u32
tlbcfg
[
4
];
u32
tlbcfg
[
4
];
u32
mmucfg
;
u32
mmucfg
;
u32
epr
;
u32
epr
;
u32
crit_save
;
struct
kvmppc_booke_debug_reg
dbg_reg
;
struct
kvmppc_booke_debug_reg
dbg_reg
;
#endif
#endif
gpa_t
paddr_accessed
;
gpa_t
paddr_accessed
;
...
...
arch/powerpc/include/asm/kvm_ppc.h
浏览文件 @
9a6d77d5
...
@@ -104,8 +104,7 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
...
@@ -104,8 +104,7 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern
void
kvmppc_core_dequeue_dec
(
struct
kvm_vcpu
*
vcpu
);
extern
void
kvmppc_core_dequeue_dec
(
struct
kvm_vcpu
*
vcpu
);
extern
void
kvmppc_core_queue_external
(
struct
kvm_vcpu
*
vcpu
,
extern
void
kvmppc_core_queue_external
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_interrupt
*
irq
);
struct
kvm_interrupt
*
irq
);
extern
void
kvmppc_core_dequeue_external
(
struct
kvm_vcpu
*
vcpu
,
extern
void
kvmppc_core_dequeue_external
(
struct
kvm_vcpu
*
vcpu
);
struct
kvm_interrupt
*
irq
);
extern
void
kvmppc_core_flush_tlb
(
struct
kvm_vcpu
*
vcpu
);
extern
void
kvmppc_core_flush_tlb
(
struct
kvm_vcpu
*
vcpu
);
extern
int
kvmppc_core_emulate_op
(
struct
kvm_run
*
run
,
struct
kvm_vcpu
*
vcpu
,
extern
int
kvmppc_core_emulate_op
(
struct
kvm_run
*
run
,
struct
kvm_vcpu
*
vcpu
,
...
...
arch/powerpc/include/uapi/asm/kvm.h
浏览文件 @
9a6d77d5
...
@@ -417,4 +417,9 @@ struct kvm_get_htab_header {
...
@@ -417,4 +417,9 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
/* Timer Status Register OR/CLEAR interface */
#define KVM_REG_PPC_OR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x87)
#define KVM_REG_PPC_CLEAR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x88)
#define KVM_REG_PPC_TCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x89)
#define KVM_REG_PPC_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8a)
#endif
/* __LINUX_KVM_POWERPC_H */
#endif
/* __LINUX_KVM_POWERPC_H */
arch/powerpc/kernel/asm-offsets.c
浏览文件 @
9a6d77d5
...
@@ -596,6 +596,7 @@ int main(void)
...
@@ -596,6 +596,7 @@ int main(void)
DEFINE
(
VCPU_LAST_INST
,
offsetof
(
struct
kvm_vcpu
,
arch
.
last_inst
));
DEFINE
(
VCPU_LAST_INST
,
offsetof
(
struct
kvm_vcpu
,
arch
.
last_inst
));
DEFINE
(
VCPU_FAULT_DEAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault_dear
));
DEFINE
(
VCPU_FAULT_DEAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault_dear
));
DEFINE
(
VCPU_FAULT_ESR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault_esr
));
DEFINE
(
VCPU_FAULT_ESR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault_esr
));
DEFINE
(
VCPU_CRIT_SAVE
,
offsetof
(
struct
kvm_vcpu
,
arch
.
crit_save
));
#endif
/* CONFIG_PPC_BOOK3S */
#endif
/* CONFIG_PPC_BOOK3S */
#endif
/* CONFIG_KVM */
#endif
/* CONFIG_KVM */
...
...
arch/powerpc/kvm/book3s.c
浏览文件 @
9a6d77d5
...
@@ -160,8 +160,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
...
@@ -160,8 +160,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_book3s_queue_irqprio
(
vcpu
,
vec
);
kvmppc_book3s_queue_irqprio
(
vcpu
,
vec
);
}
}
void
kvmppc_core_dequeue_external
(
struct
kvm_vcpu
*
vcpu
,
void
kvmppc_core_dequeue_external
(
struct
kvm_vcpu
*
vcpu
)
struct
kvm_interrupt
*
irq
)
{
{
kvmppc_book3s_dequeue_irqprio
(
vcpu
,
BOOK3S_INTERRUPT_EXTERNAL
);
kvmppc_book3s_dequeue_irqprio
(
vcpu
,
BOOK3S_INTERRUPT_EXTERNAL
);
kvmppc_book3s_dequeue_irqprio
(
vcpu
,
BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
kvmppc_book3s_dequeue_irqprio
(
vcpu
,
BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
...
...
arch/powerpc/kvm/booke.c
浏览文件 @
9a6d77d5
...
@@ -222,8 +222,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
...
@@ -222,8 +222,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio
(
vcpu
,
prio
);
kvmppc_booke_queue_irqprio
(
vcpu
,
prio
);
}
}
void
kvmppc_core_dequeue_external
(
struct
kvm_vcpu
*
vcpu
,
void
kvmppc_core_dequeue_external
(
struct
kvm_vcpu
*
vcpu
)
struct
kvm_interrupt
*
irq
)
{
{
clear_bit
(
BOOKE_IRQPRIO_EXTERNAL
,
&
vcpu
->
arch
.
pending_exceptions
);
clear_bit
(
BOOKE_IRQPRIO_EXTERNAL
,
&
vcpu
->
arch
.
pending_exceptions
);
clear_bit
(
BOOKE_IRQPRIO_EXTERNAL_LEVEL
,
&
vcpu
->
arch
.
pending_exceptions
);
clear_bit
(
BOOKE_IRQPRIO_EXTERNAL_LEVEL
,
&
vcpu
->
arch
.
pending_exceptions
);
...
@@ -1148,6 +1147,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
...
@@ -1148,6 +1147,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
return
r
;
return
r
;
}
}
static
void
kvmppc_set_tsr
(
struct
kvm_vcpu
*
vcpu
,
u32
new_tsr
)
{
u32
old_tsr
=
vcpu
->
arch
.
tsr
;
vcpu
->
arch
.
tsr
=
new_tsr
;
if
((
old_tsr
^
vcpu
->
arch
.
tsr
)
&
(
TSR_ENW
|
TSR_WIS
))
arm_next_watchdog
(
vcpu
);
update_timer_ints
(
vcpu
);
}
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
int
kvm_arch_vcpu_setup
(
struct
kvm_vcpu
*
vcpu
)
int
kvm_arch_vcpu_setup
(
struct
kvm_vcpu
*
vcpu
)
{
{
...
@@ -1287,16 +1298,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
...
@@ -1287,16 +1298,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
kvmppc_emulate_dec
(
vcpu
);
kvmppc_emulate_dec
(
vcpu
);
}
}
if
(
sregs
->
u
.
e
.
update_special
&
KVM_SREGS_E_UPDATE_TSR
)
{
if
(
sregs
->
u
.
e
.
update_special
&
KVM_SREGS_E_UPDATE_TSR
)
u32
old_tsr
=
vcpu
->
arch
.
tsr
;
kvmppc_set_tsr
(
vcpu
,
sregs
->
u
.
e
.
tsr
);
vcpu
->
arch
.
tsr
=
sregs
->
u
.
e
.
tsr
;
if
((
old_tsr
^
vcpu
->
arch
.
tsr
)
&
(
TSR_ENW
|
TSR_WIS
))
arm_next_watchdog
(
vcpu
);
update_timer_ints
(
vcpu
);
}
return
0
;
return
0
;
}
}
...
@@ -1438,6 +1441,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
...
@@ -1438,6 +1441,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r
=
put_user
(
vcpu
->
arch
.
epcr
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
r
=
put_user
(
vcpu
->
arch
.
epcr
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
break
;
break
;
#endif
#endif
case
KVM_REG_PPC_TCR
:
r
=
put_user
(
vcpu
->
arch
.
tcr
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
break
;
case
KVM_REG_PPC_TSR
:
r
=
put_user
(
vcpu
->
arch
.
tsr
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
break
;
default:
default:
break
;
break
;
}
}
...
@@ -1481,6 +1490,30 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
...
@@ -1481,6 +1490,30 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break
;
break
;
}
}
#endif
#endif
case
KVM_REG_PPC_OR_TSR
:
{
u32
tsr_bits
;
r
=
get_user
(
tsr_bits
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
kvmppc_set_tsr_bits
(
vcpu
,
tsr_bits
);
break
;
}
case
KVM_REG_PPC_CLEAR_TSR
:
{
u32
tsr_bits
;
r
=
get_user
(
tsr_bits
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
kvmppc_clr_tsr_bits
(
vcpu
,
tsr_bits
);
break
;
}
case
KVM_REG_PPC_TSR
:
{
u32
tsr
;
r
=
get_user
(
tsr
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
kvmppc_set_tsr
(
vcpu
,
tsr
);
break
;
}
case
KVM_REG_PPC_TCR
:
{
u32
tcr
;
r
=
get_user
(
tcr
,
(
u32
__user
*
)(
long
)
reg
->
addr
);
kvmppc_set_tcr
(
vcpu
,
tcr
);
break
;
}
default:
default:
break
;
break
;
}
}
...
...
arch/powerpc/kvm/booke_interrupts.S
浏览文件 @
9a6d77d5
...
@@ -54,8 +54,7 @@
...
@@ -54,8 +54,7 @@
(1<<
BOOKE_INTERRUPT_DTLB_MISS
)
|
\
(1<<
BOOKE_INTERRUPT_DTLB_MISS
)
|
\
(1<<
BOOKE_INTERRUPT_ALIGNMENT
))
(1<<
BOOKE_INTERRUPT_ALIGNMENT
))
.
macro
KVM_HANDLER
ivor_nr
scratch
srr0
.
macro
__KVM_HANDLER
ivor_nr
scratch
srr0
_GLOBAL
(
kvmppc_handler_
\
ivor_nr
)
/
*
Get
pointer
to
vcpu
and
record
exit
number
.
*/
/
*
Get
pointer
to
vcpu
and
record
exit
number
.
*/
mtspr
\
scratch
,
r4
mtspr
\
scratch
,
r4
mfspr
r4
,
SPRN_SPRG_THREAD
mfspr
r4
,
SPRN_SPRG_THREAD
...
@@ -76,6 +75,43 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
...
@@ -76,6 +75,43 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
bctr
bctr
.
endm
.
endm
.
macro
KVM_HANDLER
ivor_nr
scratch
srr0
_GLOBAL
(
kvmppc_handler_
\
ivor_nr
)
__KVM_HANDLER
\
ivor_nr
\
scratch
\
srr0
.
endm
.
macro
KVM_DBG_HANDLER
ivor_nr
scratch
srr0
_GLOBAL
(
kvmppc_handler_
\
ivor_nr
)
mtspr
\
scratch
,
r4
mfspr
r4
,
SPRN_SPRG_THREAD
lwz
r4
,
THREAD_KVM_VCPU
(
r4
)
stw
r3
,
VCPU_CRIT_SAVE
(
r4
)
mfcr
r3
mfspr
r4
,
SPRN_CSRR1
andi
.
r4
,
r4
,
MSR_PR
bne
1
f
/
*
debug
interrupt
happened
in
enter
/
exit
path
*/
mfspr
r4
,
SPRN_CSRR1
rlwinm
r4
,
r4
,
0
,
~
MSR_DE
mtspr
SPRN_CSRR1
,
r4
lis
r4
,
0xffff
ori
r4
,
r4
,
0xffff
mtspr
SPRN_DBSR
,
r4
mfspr
r4
,
SPRN_SPRG_THREAD
lwz
r4
,
THREAD_KVM_VCPU
(
r4
)
mtcr
r3
lwz
r3
,
VCPU_CRIT_SAVE
(
r4
)
mfspr
r4
,
\
scratch
rfci
1
:
/
*
debug
interrupt
happened
in
guest
*/
mtcr
r3
mfspr
r4
,
SPRN_SPRG_THREAD
lwz
r4
,
THREAD_KVM_VCPU
(
r4
)
lwz
r3
,
VCPU_CRIT_SAVE
(
r4
)
mfspr
r4
,
\
scratch
__KVM_HANDLER
\
ivor_nr
\
scratch
\
srr0
.
endm
.
macro
KVM_HANDLER_ADDR
ivor_nr
.
macro
KVM_HANDLER_ADDR
ivor_nr
.
long
kvmppc_handler_
\
ivor_nr
.
long
kvmppc_handler_
\
ivor_nr
.
endm
.
endm
...
@@ -100,7 +136,7 @@ KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
...
@@ -100,7 +136,7 @@ KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_WATCHDOG
SPRN_SPRG_RSCRATCH_CRIT
SPRN_CSRR0
KVM_HANDLER
BOOKE_INTERRUPT_WATCHDOG
SPRN_SPRG_RSCRATCH_CRIT
SPRN_CSRR0
KVM_HANDLER
BOOKE_INTERRUPT_DTLB_MISS
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_DTLB_MISS
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_ITLB_MISS
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_ITLB_MISS
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_DEBUG
SPRN_SPRG_RSCRATCH_CRIT
SPRN_CSRR0
KVM_
DBG_
HANDLER
BOOKE_INTERRUPT_DEBUG
SPRN_SPRG_RSCRATCH_CRIT
SPRN_CSRR0
KVM_HANDLER
BOOKE_INTERRUPT_SPE_UNAVAIL
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_SPE_UNAVAIL
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_SPE_FP_DATA
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_SPE_FP_DATA
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_SPE_FP_ROUND
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
KVM_HANDLER
BOOKE_INTERRUPT_SPE_FP_ROUND
SPRN_SPRG_RSCRATCH0
SPRN_SRR0
...
...
arch/powerpc/kvm/e500.h
浏览文件 @
9a6d77d5
...
@@ -26,17 +26,20 @@
...
@@ -26,17 +26,20 @@
#define E500_PID_NUM 3
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
#define E500_TLB_NUM 2
#define E500_TLB_VALID 1
/* entry is mapped somewhere in host TLB */
#define E500_TLB_BITMAP 2
#define E500_TLB_VALID (1 << 0)
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
#define E500_TLB_BITMAP (1 << 1)
/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2)
#define E500_TLB_TLB0 (1 << 2)
struct
tlbe_ref
{
struct
tlbe_ref
{
pfn_t
pfn
;
pfn_t
pfn
;
/* valid only for TLB0, except briefly */
unsigned
int
flags
;
/* E500_TLB_* */
unsigned
int
flags
;
/* E500_TLB_* */
};
};
struct
tlbe_priv
{
struct
tlbe_priv
{
struct
tlbe_ref
ref
;
/* TLB0 only -- TLB1 uses tlb_refs */
struct
tlbe_ref
ref
;
};
};
#ifdef CONFIG_KVM_E500V2
#ifdef CONFIG_KVM_E500V2
...
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
...
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
unsigned
int
gtlb_nv
[
E500_TLB_NUM
];
unsigned
int
gtlb_nv
[
E500_TLB_NUM
];
/*
* information associated with each host TLB entry --
* TLB1 only for now. If/when guest TLB1 entries can be
* mapped with host TLB0, this will be used for that too.
*
* We don't want to use this for guest TLB0 because then we'd
* have the overhead of doing the translation again even if
* the entry is still in the guest TLB (e.g. we swapped out
* and back, and our host TLB entries got evicted).
*/
struct
tlbe_ref
*
tlb_refs
[
E500_TLB_NUM
];
unsigned
int
host_tlb1_nv
;
unsigned
int
host_tlb1_nv
;
u32
svr
;
u32
svr
;
...
...
arch/powerpc/kvm/e500_mmu_host.c
浏览文件 @
9a6d77d5
...
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
...
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct
tlbe_ref
*
ref
=
&
vcpu_e500
->
gtlb_priv
[
tlbsel
][
esel
].
ref
;
struct
tlbe_ref
*
ref
=
&
vcpu_e500
->
gtlb_priv
[
tlbsel
][
esel
].
ref
;
/* Don't bother with unmapped entries */
/* Don't bother with unmapped entries */
if
(
!
(
ref
->
flags
&
E500_TLB_VALID
))
if
(
!
(
ref
->
flags
&
E500_TLB_VALID
))
{
return
;
WARN
(
ref
->
flags
&
(
E500_TLB_BITMAP
|
E500_TLB_TLB0
),
"%s: flags %x
\n
"
,
__func__
,
ref
->
flags
);
WARN_ON
(
tlbsel
==
1
&&
vcpu_e500
->
g2h_tlb1_map
[
esel
]);
}
if
(
tlbsel
==
1
&&
ref
->
flags
&
E500_TLB_BITMAP
)
{
if
(
tlbsel
==
1
&&
ref
->
flags
&
E500_TLB_BITMAP
)
{
u64
tmp
=
vcpu_e500
->
g2h_tlb1_map
[
esel
];
u64
tmp
=
vcpu_e500
->
g2h_tlb1_map
[
esel
];
...
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
...
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t
pfn
)
pfn_t
pfn
)
{
{
ref
->
pfn
=
pfn
;
ref
->
pfn
=
pfn
;
ref
->
flags
=
E500_TLB_VALID
;
ref
->
flags
|
=
E500_TLB_VALID
;
if
(
tlbe_is_writable
(
gtlbe
))
if
(
tlbe_is_writable
(
gtlbe
))
kvm_set_pfn_dirty
(
pfn
);
kvm_set_pfn_dirty
(
pfn
);
...
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
...
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static
inline
void
kvmppc_e500_ref_release
(
struct
tlbe_ref
*
ref
)
static
inline
void
kvmppc_e500_ref_release
(
struct
tlbe_ref
*
ref
)
{
{
if
(
ref
->
flags
&
E500_TLB_VALID
)
{
if
(
ref
->
flags
&
E500_TLB_VALID
)
{
/* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release
(
ref
->
pfn
,
ref
->
flags
);
trace_kvm_booke206_ref_release
(
ref
->
pfn
,
ref
->
flags
);
ref
->
flags
=
0
;
ref
->
flags
=
0
;
}
}
...
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
...
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static
void
clear_tlb_privs
(
struct
kvmppc_vcpu_e500
*
vcpu_e500
)
static
void
clear_tlb_privs
(
struct
kvmppc_vcpu_e500
*
vcpu_e500
)
{
{
int
tlbsel
=
0
;
int
tlbsel
;
int
i
;
for
(
i
=
0
;
i
<
vcpu_e500
->
gtlb_params
[
tlbsel
].
entries
;
i
++
)
{
struct
tlbe_ref
*
ref
=
&
vcpu_e500
->
gtlb_priv
[
tlbsel
][
i
].
ref
;
kvmppc_e500_ref_release
(
ref
);
}
}
static
void
clear_tlb_refs
(
struct
kvmppc_vcpu_e500
*
vcpu_e500
)
{
int
stlbsel
=
1
;
int
i
;
int
i
;
kvmppc_e500_tlbil_all
(
vcpu_e500
);
for
(
tlbsel
=
0
;
tlbsel
<=
1
;
tlbsel
++
)
{
for
(
i
=
0
;
i
<
vcpu_e500
->
gtlb_params
[
tlbsel
].
entries
;
i
++
)
{
for
(
i
=
0
;
i
<
host_tlb_params
[
stlbsel
].
entries
;
i
++
)
{
struct
tlbe_ref
*
ref
=
struct
tlbe_ref
*
ref
=
&
vcpu_e500
->
gtlb_priv
[
tlbsel
][
i
].
ref
;
&
vcpu_e500
->
tlb_refs
[
stlbsel
][
i
]
;
kvmppc_e500_ref_release
(
ref
)
;
kvmppc_e500_ref_release
(
ref
);
}
}
}
clear_tlb_privs
(
vcpu_e500
);
}
}
void
kvmppc_core_flush_tlb
(
struct
kvm_vcpu
*
vcpu
)
void
kvmppc_core_flush_tlb
(
struct
kvm_vcpu
*
vcpu
)
{
{
struct
kvmppc_vcpu_e500
*
vcpu_e500
=
to_e500
(
vcpu
);
struct
kvmppc_vcpu_e500
*
vcpu_e500
=
to_e500
(
vcpu
);
clear_tlb_refs
(
vcpu_e500
);
kvmppc_e500_tlbil_all
(
vcpu_e500
);
clear_tlb_privs
(
vcpu_e500
);
clear_tlb1_bitmap
(
vcpu_e500
);
clear_tlb1_bitmap
(
vcpu_e500
);
}
}
...
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
...
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr
&=
~
((
tsize_pages
<<
PAGE_SHIFT
)
-
1
);
gvaddr
&=
~
((
tsize_pages
<<
PAGE_SHIFT
)
-
1
);
}
}
/* Drop old ref and setup new one. */
kvmppc_e500_ref_release
(
ref
);
kvmppc_e500_ref_setup
(
ref
,
gtlbe
,
pfn
);
kvmppc_e500_ref_setup
(
ref
,
gtlbe
,
pfn
);
kvmppc_e500_setup_stlbe
(
&
vcpu_e500
->
vcpu
,
gtlbe
,
tsize
,
kvmppc_e500_setup_stlbe
(
&
vcpu_e500
->
vcpu
,
gtlbe
,
tsize
,
...
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
...
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if
(
unlikely
(
vcpu_e500
->
host_tlb1_nv
>=
tlb1_max_shadow_size
()))
if
(
unlikely
(
vcpu_e500
->
host_tlb1_nv
>=
tlb1_max_shadow_size
()))
vcpu_e500
->
host_tlb1_nv
=
0
;
vcpu_e500
->
host_tlb1_nv
=
0
;
vcpu_e500
->
tlb_refs
[
1
][
sesel
]
=
*
ref
;
vcpu_e500
->
g2h_tlb1_map
[
esel
]
|=
(
u64
)
1
<<
sesel
;
vcpu_e500
->
gtlb_priv
[
1
][
esel
].
ref
.
flags
|=
E500_TLB_BITMAP
;
if
(
vcpu_e500
->
h2g_tlb1_rmap
[
sesel
])
{
if
(
vcpu_e500
->
h2g_tlb1_rmap
[
sesel
])
{
unsigned
int
idx
=
vcpu_e500
->
h2g_tlb1_rmap
[
sesel
];
unsigned
int
idx
=
vcpu_e500
->
h2g_tlb1_rmap
[
sesel
]
-
1
;
vcpu_e500
->
g2h_tlb1_map
[
idx
]
&=
~
(
1ULL
<<
sesel
);
vcpu_e500
->
g2h_tlb1_map
[
idx
]
&=
~
(
1ULL
<<
sesel
);
}
}
vcpu_e500
->
h2g_tlb1_rmap
[
sesel
]
=
esel
;
vcpu_e500
->
gtlb_priv
[
1
][
esel
].
ref
.
flags
|=
E500_TLB_BITMAP
;
vcpu_e500
->
g2h_tlb1_map
[
esel
]
|=
(
u64
)
1
<<
sesel
;
vcpu_e500
->
h2g_tlb1_rmap
[
sesel
]
=
esel
+
1
;
WARN_ON
(
!
(
ref
->
flags
&
E500_TLB_VALID
));
return
sesel
;
return
sesel
;
}
}
...
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
...
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64
gvaddr
,
gfn_t
gfn
,
struct
kvm_book3e_206_tlb_entry
*
gtlbe
,
u64
gvaddr
,
gfn_t
gfn
,
struct
kvm_book3e_206_tlb_entry
*
gtlbe
,
struct
kvm_book3e_206_tlb_entry
*
stlbe
,
int
esel
)
struct
kvm_book3e_206_tlb_entry
*
stlbe
,
int
esel
)
{
{
struct
tlbe_ref
ref
;
struct
tlbe_ref
*
ref
=
&
vcpu_e500
->
gtlb_priv
[
1
][
esel
].
ref
;
int
sesel
;
int
sesel
;
int
r
;
int
r
;
ref
.
flags
=
0
;
r
=
kvmppc_e500_shadow_map
(
vcpu_e500
,
gvaddr
,
gfn
,
gtlbe
,
1
,
stlbe
,
r
=
kvmppc_e500_shadow_map
(
vcpu_e500
,
gvaddr
,
gfn
,
gtlbe
,
1
,
stlbe
,
&
ref
);
ref
);
if
(
r
)
if
(
r
)
return
r
;
return
r
;
...
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
...
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
}
/* Otherwise map into TLB1 */
/* Otherwise map into TLB1 */
sesel
=
kvmppc_e500_tlb1_map_tlb1
(
vcpu_e500
,
&
ref
,
esel
);
sesel
=
kvmppc_e500_tlb1_map_tlb1
(
vcpu_e500
,
ref
,
esel
);
write_stlbe
(
vcpu_e500
,
gtlbe
,
stlbe
,
1
,
sesel
);
write_stlbe
(
vcpu_e500
,
gtlbe
,
stlbe
,
1
,
sesel
);
return
0
;
return
0
;
...
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
...
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case
0
:
case
0
:
priv
=
&
vcpu_e500
->
gtlb_priv
[
tlbsel
][
esel
];
priv
=
&
vcpu_e500
->
gtlb_priv
[
tlbsel
][
esel
];
/* Triggers after clear_tlb_
ref
s or on initial mapping */
/* Triggers after clear_tlb_
priv
s or on initial mapping */
if
(
!
(
priv
->
ref
.
flags
&
E500_TLB_VALID
))
{
if
(
!
(
priv
->
ref
.
flags
&
E500_TLB_VALID
))
{
kvmppc_e500_tlb0_map
(
vcpu_e500
,
esel
,
&
stlbe
);
kvmppc_e500_tlb0_map
(
vcpu_e500
,
esel
,
&
stlbe
);
}
else
{
}
else
{
...
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
...
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params
[
0
].
entries
/
host_tlb_params
[
0
].
ways
;
host_tlb_params
[
0
].
entries
/
host_tlb_params
[
0
].
ways
;
host_tlb_params
[
1
].
sets
=
1
;
host_tlb_params
[
1
].
sets
=
1
;
vcpu_e500
->
tlb_refs
[
0
]
=
kzalloc
(
sizeof
(
struct
tlbe_ref
)
*
host_tlb_params
[
0
].
entries
,
GFP_KERNEL
);
if
(
!
vcpu_e500
->
tlb_refs
[
0
])
goto
err
;
vcpu_e500
->
tlb_refs
[
1
]
=
kzalloc
(
sizeof
(
struct
tlbe_ref
)
*
host_tlb_params
[
1
].
entries
,
GFP_KERNEL
);
if
(
!
vcpu_e500
->
tlb_refs
[
1
])
goto
err
;
vcpu_e500
->
h2g_tlb1_rmap
=
kzalloc
(
sizeof
(
unsigned
int
)
*
vcpu_e500
->
h2g_tlb1_rmap
=
kzalloc
(
sizeof
(
unsigned
int
)
*
host_tlb_params
[
1
].
entries
,
host_tlb_params
[
1
].
entries
,
GFP_KERNEL
);
GFP_KERNEL
);
if
(
!
vcpu_e500
->
h2g_tlb1_rmap
)
if
(
!
vcpu_e500
->
h2g_tlb1_rmap
)
goto
err
;
return
-
EINVAL
;
return
0
;
return
0
;
err:
kfree
(
vcpu_e500
->
tlb_refs
[
0
]);
kfree
(
vcpu_e500
->
tlb_refs
[
1
]);
return
-
EINVAL
;
}
}
void
e500_mmu_host_uninit
(
struct
kvmppc_vcpu_e500
*
vcpu_e500
)
void
e500_mmu_host_uninit
(
struct
kvmppc_vcpu_e500
*
vcpu_e500
)
{
{
kfree
(
vcpu_e500
->
h2g_tlb1_rmap
);
kfree
(
vcpu_e500
->
h2g_tlb1_rmap
);
kfree
(
vcpu_e500
->
tlb_refs
[
0
]);
kfree
(
vcpu_e500
->
tlb_refs
[
1
]);
}
}
arch/powerpc/kvm/powerpc.c
浏览文件 @
9a6d77d5
...
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
...
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int
kvm_vcpu_ioctl_interrupt
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_interrupt
*
irq
)
int
kvm_vcpu_ioctl_interrupt
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_interrupt
*
irq
)
{
{
if
(
irq
->
irq
==
KVM_INTERRUPT_UNSET
)
{
if
(
irq
->
irq
==
KVM_INTERRUPT_UNSET
)
{
kvmppc_core_dequeue_external
(
vcpu
,
irq
);
kvmppc_core_dequeue_external
(
vcpu
);
return
0
;
return
0
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录