Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
69b745ff
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
69b745ff
编写于
2月 05, 2009
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'tj-percpu' of
git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc
into core/percpu
上级
ef3892bd
e4d04071
变更
21
展开全部
隐藏空白更改
内联
并排
Showing
21 changed file
with
1379 addition
and
1178 deletion
+1379
-1178
arch/x86/include/asm/page.h
arch/x86/include/asm/page.h
+1
-2
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt.h
+301
-152
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable.h
+26
-12
arch/x86/kernel/entry_64.S
arch/x86/kernel/entry_64.S
+1
-1
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt.c
+41
-14
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_32.c
+12
-0
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/paravirt_patch_64.c
+15
-0
arch/x86/kernel/vmi_32.c
arch/x86/kernel/vmi_32.c
+5
-4
arch/x86/kernel/vsmp_64.c
arch/x86/kernel/vsmp_64.c
+8
-4
arch/x86/lguest/boot.c
arch/x86/lguest/boot.c
+9
-4
arch/x86/xen/Makefile
arch/x86/xen/Makefile
+2
-1
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten.c
+19
-748
arch/x86/xen/irq.c
arch/x86/xen/irq.c
+10
-4
arch/x86/xen/mmu.c
arch/x86/xen/mmu.c
+745
-0
arch/x86/xen/mmu.h
arch/x86/xen/mmu.h
+3
-0
arch/x86/xen/smp.c
arch/x86/xen/smp.c
+4
-2
arch/x86/xen/xen-asm.S
arch/x86/xen/xen-asm.S
+140
-0
arch/x86/xen/xen-asm.h
arch/x86/xen/xen-asm.h
+12
-0
arch/x86/xen/xen-asm_32.S
arch/x86/xen/xen-asm_32.S
+12
-99
arch/x86/xen/xen-asm_64.S
arch/x86/xen/xen-asm_64.S
+3
-131
arch/x86/xen/xen-ops.h
arch/x86/xen/xen-ops.h
+10
-0
未找到文件。
arch/x86/include/asm/page.h
浏览文件 @
69b745ff
...
@@ -147,7 +147,7 @@ static inline pteval_t native_pte_val(pte_t pte)
...
@@ -147,7 +147,7 @@ static inline pteval_t native_pte_val(pte_t pte)
return
pte
.
pte
;
return
pte
.
pte
;
}
}
static
inline
pteval_t
native_
pte_flags
(
pte_t
pte
)
static
inline
pteval_t
pte_flags
(
pte_t
pte
)
{
{
return
native_pte_val
(
pte
)
&
PTE_FLAGS_MASK
;
return
native_pte_val
(
pte
)
&
PTE_FLAGS_MASK
;
}
}
...
@@ -173,7 +173,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
...
@@ -173,7 +173,6 @@ static inline pteval_t native_pte_flags(pte_t pte)
#endif
#endif
#define pte_val(x) native_pte_val(x)
#define pte_val(x) native_pte_val(x)
#define pte_flags(x) native_pte_flags(x)
#define __pte(x) native_make_pte(x)
#define __pte(x) native_make_pte(x)
#endif
/* CONFIG_PARAVIRT */
#endif
/* CONFIG_PARAVIRT */
...
...
arch/x86/include/asm/paravirt.h
浏览文件 @
69b745ff
此差异已折叠。
点击以展开。
arch/x86/include/asm/pgtable.h
浏览文件 @
69b745ff
...
@@ -240,64 +240,78 @@ static inline int pmd_large(pmd_t pte)
...
@@ -240,64 +240,78 @@ static inline int pmd_large(pmd_t pte)
(
_PAGE_PSE
|
_PAGE_PRESENT
);
(
_PAGE_PSE
|
_PAGE_PRESENT
);
}
}
static
inline
pte_t
pte_set_flags
(
pte_t
pte
,
pteval_t
set
)
{
pteval_t
v
=
native_pte_val
(
pte
);
return
native_make_pte
(
v
|
set
);
}
static
inline
pte_t
pte_clear_flags
(
pte_t
pte
,
pteval_t
clear
)
{
pteval_t
v
=
native_pte_val
(
pte
);
return
native_make_pte
(
v
&
~
clear
);
}
static
inline
pte_t
pte_mkclean
(
pte_t
pte
)
static
inline
pte_t
pte_mkclean
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
&
~
_PAGE_DIRTY
);
return
pte_clear_flags
(
pte
,
_PAGE_DIRTY
);
}
}
static
inline
pte_t
pte_mkold
(
pte_t
pte
)
static
inline
pte_t
pte_mkold
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
&
~
_PAGE_ACCESSED
);
return
pte_clear_flags
(
pte
,
_PAGE_ACCESSED
);
}
}
static
inline
pte_t
pte_wrprotect
(
pte_t
pte
)
static
inline
pte_t
pte_wrprotect
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
&
~
_PAGE_RW
);
return
pte_clear_flags
(
pte
,
_PAGE_RW
);
}
}
static
inline
pte_t
pte_mkexec
(
pte_t
pte
)
static
inline
pte_t
pte_mkexec
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
&
~
_PAGE_NX
);
return
pte_clear_flags
(
pte
,
_PAGE_NX
);
}
}
static
inline
pte_t
pte_mkdirty
(
pte_t
pte
)
static
inline
pte_t
pte_mkdirty
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
|
_PAGE_DIRTY
);
return
pte_set_flags
(
pte
,
_PAGE_DIRTY
);
}
}
static
inline
pte_t
pte_mkyoung
(
pte_t
pte
)
static
inline
pte_t
pte_mkyoung
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
|
_PAGE_ACCESSED
);
return
pte_set_flags
(
pte
,
_PAGE_ACCESSED
);
}
}
static
inline
pte_t
pte_mkwrite
(
pte_t
pte
)
static
inline
pte_t
pte_mkwrite
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
|
_PAGE_RW
);
return
pte_set_flags
(
pte
,
_PAGE_RW
);
}
}
static
inline
pte_t
pte_mkhuge
(
pte_t
pte
)
static
inline
pte_t
pte_mkhuge
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
|
_PAGE_PSE
);
return
pte_set_flags
(
pte
,
_PAGE_PSE
);
}
}
static
inline
pte_t
pte_clrhuge
(
pte_t
pte
)
static
inline
pte_t
pte_clrhuge
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
&
~
_PAGE_PSE
);
return
pte_clear_flags
(
pte
,
_PAGE_PSE
);
}
}
static
inline
pte_t
pte_mkglobal
(
pte_t
pte
)
static
inline
pte_t
pte_mkglobal
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
|
_PAGE_GLOBAL
);
return
pte_set_flags
(
pte
,
_PAGE_GLOBAL
);
}
}
static
inline
pte_t
pte_clrglobal
(
pte_t
pte
)
static
inline
pte_t
pte_clrglobal
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
&
~
_PAGE_GLOBAL
);
return
pte_clear_flags
(
pte
,
_PAGE_GLOBAL
);
}
}
static
inline
pte_t
pte_mkspecial
(
pte_t
pte
)
static
inline
pte_t
pte_mkspecial
(
pte_t
pte
)
{
{
return
__pte
(
pte_val
(
pte
)
|
_PAGE_SPECIAL
);
return
pte_set_flags
(
pte
,
_PAGE_SPECIAL
);
}
}
extern
pteval_t
__supported_pte_mask
;
extern
pteval_t
__supported_pte_mask
;
...
...
arch/x86/kernel/entry_64.S
浏览文件 @
69b745ff
...
@@ -1140,7 +1140,7 @@ ENTRY(native_load_gs_index)
...
@@ -1140,7 +1140,7 @@ ENTRY(native_load_gs_index)
CFI_STARTPROC
CFI_STARTPROC
pushf
pushf
CFI_ADJUST_CFA_OFFSET
8
CFI_ADJUST_CFA_OFFSET
8
DISABLE_INTERRUPTS
(
CLBR_ANY
|
~
(
CLBR_RDI
)
)
DISABLE_INTERRUPTS
(
CLBR_ANY
&
~
CLBR_RDI
)
SWAPGS
SWAPGS
gs_change
:
gs_change
:
movl
%
edi
,%
gs
movl
%
edi
,%
gs
...
...
arch/x86/kernel/paravirt.c
浏览文件 @
69b745ff
...
@@ -44,6 +44,17 @@ void _paravirt_nop(void)
...
@@ -44,6 +44,17 @@ void _paravirt_nop(void)
{
{
}
}
/* identity function, which can be inlined */
u32
_paravirt_ident_32
(
u32
x
)
{
return
x
;
}
u64
_paravirt_ident_64
(
u64
x
)
{
return
x
;
}
static
void
__init
default_banner
(
void
)
static
void
__init
default_banner
(
void
)
{
{
printk
(
KERN_INFO
"Booting paravirtualized kernel on %s
\n
"
,
printk
(
KERN_INFO
"Booting paravirtualized kernel on %s
\n
"
,
...
@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
...
@@ -138,9 +149,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
if
(
opfunc
==
NULL
)
if
(
opfunc
==
NULL
)
/* If there's no function, patch it with a ud2a (BUG) */
/* If there's no function, patch it with a ud2a (BUG) */
ret
=
paravirt_patch_insns
(
insnbuf
,
len
,
ud2a
,
ud2a
+
sizeof
(
ud2a
));
ret
=
paravirt_patch_insns
(
insnbuf
,
len
,
ud2a
,
ud2a
+
sizeof
(
ud2a
));
else
if
(
opfunc
==
paravirt_nop
)
else
if
(
opfunc
==
_
paravirt_nop
)
/* If the operation is a nop, then nop the callsite */
/* If the operation is a nop, then nop the callsite */
ret
=
paravirt_patch_nop
();
ret
=
paravirt_patch_nop
();
/* identity functions just return their single argument */
else
if
(
opfunc
==
_paravirt_ident_32
)
ret
=
paravirt_patch_ident_32
(
insnbuf
,
len
);
else
if
(
opfunc
==
_paravirt_ident_64
)
ret
=
paravirt_patch_ident_64
(
insnbuf
,
len
);
else
if
(
type
==
PARAVIRT_PATCH
(
pv_cpu_ops
.
iret
)
||
else
if
(
type
==
PARAVIRT_PATCH
(
pv_cpu_ops
.
iret
)
||
type
==
PARAVIRT_PATCH
(
pv_cpu_ops
.
irq_enable_sysexit
)
||
type
==
PARAVIRT_PATCH
(
pv_cpu_ops
.
irq_enable_sysexit
)
||
type
==
PARAVIRT_PATCH
(
pv_cpu_ops
.
usergs_sysret32
)
||
type
==
PARAVIRT_PATCH
(
pv_cpu_ops
.
usergs_sysret32
)
||
...
@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
...
@@ -292,10 +310,10 @@ struct pv_time_ops pv_time_ops = {
struct
pv_irq_ops
pv_irq_ops
=
{
struct
pv_irq_ops
pv_irq_ops
=
{
.
init_IRQ
=
native_init_IRQ
,
.
init_IRQ
=
native_init_IRQ
,
.
save_fl
=
native_save_fl
,
.
save_fl
=
__PV_IS_CALLEE_SAVE
(
native_save_fl
)
,
.
restore_fl
=
native_restore_fl
,
.
restore_fl
=
__PV_IS_CALLEE_SAVE
(
native_restore_fl
)
,
.
irq_disable
=
native_irq_disable
,
.
irq_disable
=
__PV_IS_CALLEE_SAVE
(
native_irq_disable
)
,
.
irq_enable
=
native_irq_enable
,
.
irq_enable
=
__PV_IS_CALLEE_SAVE
(
native_irq_enable
)
,
.
safe_halt
=
native_safe_halt
,
.
safe_halt
=
native_safe_halt
,
.
halt
=
native_halt
,
.
halt
=
native_halt
,
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
...
@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = {
...
@@ -373,6 +391,14 @@ struct pv_apic_ops pv_apic_ops = {
#endif
#endif
};
};
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
/* 32-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
#else
/* 64-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
#endif
struct
pv_mmu_ops
pv_mmu_ops
=
{
struct
pv_mmu_ops
pv_mmu_ops
=
{
#ifndef CONFIG_X86_64
#ifndef CONFIG_X86_64
.
pagetable_setup_start
=
native_pagetable_setup_start
,
.
pagetable_setup_start
=
native_pagetable_setup_start
,
...
@@ -424,22 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = {
...
@@ -424,22 +450,23 @@ struct pv_mmu_ops pv_mmu_ops = {
.
pmd_clear
=
native_pmd_clear
,
.
pmd_clear
=
native_pmd_clear
,
#endif
#endif
.
set_pud
=
native_set_pud
,
.
set_pud
=
native_set_pud
,
.
pmd_val
=
native_pmd_val
,
.
make_pmd
=
native_make_pmd
,
.
pmd_val
=
PTE_IDENT
,
.
make_pmd
=
PTE_IDENT
,
#if PAGETABLE_LEVELS == 4
#if PAGETABLE_LEVELS == 4
.
pud_val
=
native_pud_val
,
.
pud_val
=
PTE_IDENT
,
.
make_pud
=
native_make_pud
,
.
make_pud
=
PTE_IDENT
,
.
set_pgd
=
native_set_pgd
,
.
set_pgd
=
native_set_pgd
,
#endif
#endif
#endif
/* PAGETABLE_LEVELS >= 3 */
#endif
/* PAGETABLE_LEVELS >= 3 */
.
pte_val
=
native_pte_val
,
.
pte_val
=
PTE_IDENT
,
.
pte_flags
=
native_pte_flags
,
.
pgd_val
=
PTE_IDENT
,
.
pgd_val
=
native_pgd_val
,
.
make_pte
=
native_make_pte
,
.
make_pte
=
PTE_IDENT
,
.
make_pgd
=
native_make_pgd
,
.
make_pgd
=
PTE_IDENT
,
.
dup_mmap
=
paravirt_nop
,
.
dup_mmap
=
paravirt_nop
,
.
exit_mmap
=
paravirt_nop
,
.
exit_mmap
=
paravirt_nop
,
...
...
arch/x86/kernel/paravirt_patch_32.c
浏览文件 @
69b745ff
...
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
...
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
DEF_NATIVE
(
pv_cpu_ops
,
clts
,
"clts"
);
DEF_NATIVE
(
pv_cpu_ops
,
clts
,
"clts"
);
DEF_NATIVE
(
pv_cpu_ops
,
read_tsc
,
"rdtsc"
);
DEF_NATIVE
(
pv_cpu_ops
,
read_tsc
,
"rdtsc"
);
unsigned
paravirt_patch_ident_32
(
void
*
insnbuf
,
unsigned
len
)
{
/* arg in %eax, return in %eax */
return
0
;
}
unsigned
paravirt_patch_ident_64
(
void
*
insnbuf
,
unsigned
len
)
{
/* arg in %edx:%eax, return in %edx:%eax */
return
0
;
}
unsigned
native_patch
(
u8
type
,
u16
clobbers
,
void
*
ibuf
,
unsigned
native_patch
(
u8
type
,
u16
clobbers
,
void
*
ibuf
,
unsigned
long
addr
,
unsigned
len
)
unsigned
long
addr
,
unsigned
len
)
{
{
...
...
arch/x86/kernel/paravirt_patch_64.c
浏览文件 @
69b745ff
...
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
...
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
DEF_NATIVE
(
pv_cpu_ops
,
usergs_sysret32
,
"swapgs; sysretl"
);
DEF_NATIVE
(
pv_cpu_ops
,
usergs_sysret32
,
"swapgs; sysretl"
);
DEF_NATIVE
(
pv_cpu_ops
,
swapgs
,
"swapgs"
);
DEF_NATIVE
(
pv_cpu_ops
,
swapgs
,
"swapgs"
);
DEF_NATIVE
(,
mov32
,
"mov %edi, %eax"
);
DEF_NATIVE
(,
mov64
,
"mov %rdi, %rax"
);
unsigned
paravirt_patch_ident_32
(
void
*
insnbuf
,
unsigned
len
)
{
return
paravirt_patch_insns
(
insnbuf
,
len
,
start__mov32
,
end__mov32
);
}
unsigned
paravirt_patch_ident_64
(
void
*
insnbuf
,
unsigned
len
)
{
return
paravirt_patch_insns
(
insnbuf
,
len
,
start__mov64
,
end__mov64
);
}
unsigned
native_patch
(
u8
type
,
u16
clobbers
,
void
*
ibuf
,
unsigned
native_patch
(
u8
type
,
u16
clobbers
,
void
*
ibuf
,
unsigned
long
addr
,
unsigned
len
)
unsigned
long
addr
,
unsigned
len
)
{
{
...
...
arch/x86/kernel/vmi_32.c
浏览文件 @
69b745ff
...
@@ -670,10 +670,11 @@ static inline int __init activate_vmi(void)
...
@@ -670,10 +670,11 @@ static inline int __init activate_vmi(void)
para_fill
(
pv_mmu_ops
.
write_cr2
,
SetCR2
);
para_fill
(
pv_mmu_ops
.
write_cr2
,
SetCR2
);
para_fill
(
pv_mmu_ops
.
write_cr3
,
SetCR3
);
para_fill
(
pv_mmu_ops
.
write_cr3
,
SetCR3
);
para_fill
(
pv_cpu_ops
.
write_cr4
,
SetCR4
);
para_fill
(
pv_cpu_ops
.
write_cr4
,
SetCR4
);
para_fill
(
pv_irq_ops
.
save_fl
,
GetInterruptMask
);
para_fill
(
pv_irq_ops
.
restore_fl
,
SetInterruptMask
);
para_fill
(
pv_irq_ops
.
save_fl
.
func
,
GetInterruptMask
);
para_fill
(
pv_irq_ops
.
irq_disable
,
DisableInterrupts
);
para_fill
(
pv_irq_ops
.
restore_fl
.
func
,
SetInterruptMask
);
para_fill
(
pv_irq_ops
.
irq_enable
,
EnableInterrupts
);
para_fill
(
pv_irq_ops
.
irq_disable
.
func
,
DisableInterrupts
);
para_fill
(
pv_irq_ops
.
irq_enable
.
func
,
EnableInterrupts
);
para_fill
(
pv_cpu_ops
.
wbinvd
,
WBINVD
);
para_fill
(
pv_cpu_ops
.
wbinvd
,
WBINVD
);
para_fill
(
pv_cpu_ops
.
read_tsc
,
RDTSC
);
para_fill
(
pv_cpu_ops
.
read_tsc
,
RDTSC
);
...
...
arch/x86/kernel/vsmp_64.c
浏览文件 @
69b745ff
...
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
...
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
flags
&=
~
X86_EFLAGS_IF
;
flags
&=
~
X86_EFLAGS_IF
;
return
flags
;
return
flags
;
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
vsmp_save_fl
);
static
void
vsmp_restore_fl
(
unsigned
long
flags
)
static
void
vsmp_restore_fl
(
unsigned
long
flags
)
{
{
...
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
...
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
flags
|=
X86_EFLAGS_AC
;
flags
|=
X86_EFLAGS_AC
;
native_restore_fl
(
flags
);
native_restore_fl
(
flags
);
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
vsmp_restore_fl
);
static
void
vsmp_irq_disable
(
void
)
static
void
vsmp_irq_disable
(
void
)
{
{
...
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
...
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
native_restore_fl
((
flags
&
~
X86_EFLAGS_IF
)
|
X86_EFLAGS_AC
);
native_restore_fl
((
flags
&
~
X86_EFLAGS_IF
)
|
X86_EFLAGS_AC
);
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
vsmp_irq_disable
);
static
void
vsmp_irq_enable
(
void
)
static
void
vsmp_irq_enable
(
void
)
{
{
...
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
...
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
native_restore_fl
((
flags
|
X86_EFLAGS_IF
)
&
(
~
X86_EFLAGS_AC
));
native_restore_fl
((
flags
|
X86_EFLAGS_IF
)
&
(
~
X86_EFLAGS_AC
));
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
vsmp_irq_enable
);
static
unsigned
__init_or_module
vsmp_patch
(
u8
type
,
u16
clobbers
,
void
*
ibuf
,
static
unsigned
__init_or_module
vsmp_patch
(
u8
type
,
u16
clobbers
,
void
*
ibuf
,
unsigned
long
addr
,
unsigned
len
)
unsigned
long
addr
,
unsigned
len
)
...
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
...
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
cap
,
ctl
);
cap
,
ctl
);
if
(
cap
&
ctl
&
(
1
<<
4
))
{
if
(
cap
&
ctl
&
(
1
<<
4
))
{
/* Setup irq ops and turn on vSMP IRQ fastpath handling */
/* Setup irq ops and turn on vSMP IRQ fastpath handling */
pv_irq_ops
.
irq_disable
=
vsmp_irq_disable
;
pv_irq_ops
.
irq_disable
=
PV_CALLEE_SAVE
(
vsmp_irq_disable
)
;
pv_irq_ops
.
irq_enable
=
vsmp_irq_enable
;
pv_irq_ops
.
irq_enable
=
PV_CALLEE_SAVE
(
vsmp_irq_enable
)
;
pv_irq_ops
.
save_fl
=
vsmp_save_fl
;
pv_irq_ops
.
save_fl
=
PV_CALLEE_SAVE
(
vsmp_save_fl
)
;
pv_irq_ops
.
restore_fl
=
vsmp_restore_fl
;
pv_irq_ops
.
restore_fl
=
PV_CALLEE_SAVE
(
vsmp_restore_fl
)
;
pv_init_ops
.
patch
=
vsmp_patch
;
pv_init_ops
.
patch
=
vsmp_patch
;
ctl
&=
~
(
1
<<
4
);
ctl
&=
~
(
1
<<
4
);
...
...
arch/x86/lguest/boot.c
浏览文件 @
69b745ff
...
@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
...
@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
{
{
return
lguest_data
.
irq_enabled
;
return
lguest_data
.
irq_enabled
;
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
save_fl
);
/* restore_flags() just sets the flags back to the value given. */
/* restore_flags() just sets the flags back to the value given. */
static
void
restore_fl
(
unsigned
long
flags
)
static
void
restore_fl
(
unsigned
long
flags
)
{
{
lguest_data
.
irq_enabled
=
flags
;
lguest_data
.
irq_enabled
=
flags
;
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
restore_fl
);
/* Interrupts go off... */
/* Interrupts go off... */
static
void
irq_disable
(
void
)
static
void
irq_disable
(
void
)
{
{
lguest_data
.
irq_enabled
=
0
;
lguest_data
.
irq_enabled
=
0
;
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
irq_disable
);
/* Interrupts go on... */
/* Interrupts go on... */
static
void
irq_enable
(
void
)
static
void
irq_enable
(
void
)
{
{
lguest_data
.
irq_enabled
=
X86_EFLAGS_IF
;
lguest_data
.
irq_enabled
=
X86_EFLAGS_IF
;
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
irq_enable
);
/*:*/
/*:*/
/*M:003 Note that we don't check for outstanding interrupts when we re-enable
/*M:003 Note that we don't check for outstanding interrupts when we re-enable
* them (or when we unmask an interrupt). This seems to work for the moment,
* them (or when we unmask an interrupt). This seems to work for the moment,
...
@@ -984,10 +989,10 @@ __init void lguest_init(void)
...
@@ -984,10 +989,10 @@ __init void lguest_init(void)
/* interrupt-related operations */
/* interrupt-related operations */
pv_irq_ops
.
init_IRQ
=
lguest_init_IRQ
;
pv_irq_ops
.
init_IRQ
=
lguest_init_IRQ
;
pv_irq_ops
.
save_fl
=
save_fl
;
pv_irq_ops
.
save_fl
=
PV_CALLEE_SAVE
(
save_fl
)
;
pv_irq_ops
.
restore_fl
=
restore_fl
;
pv_irq_ops
.
restore_fl
=
PV_CALLEE_SAVE
(
restore_fl
)
;
pv_irq_ops
.
irq_disable
=
irq_disable
;
pv_irq_ops
.
irq_disable
=
PV_CALLEE_SAVE
(
irq_disable
)
;
pv_irq_ops
.
irq_enable
=
irq_enable
;
pv_irq_ops
.
irq_enable
=
PV_CALLEE_SAVE
(
irq_enable
)
;
pv_irq_ops
.
safe_halt
=
lguest_safe_halt
;
pv_irq_ops
.
safe_halt
=
lguest_safe_halt
;
/* init-time operations */
/* init-time operations */
...
...
arch/x86/xen/Makefile
浏览文件 @
69b745ff
...
@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
...
@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
endif
endif
obj-y
:=
enlighten.o setup.o multicalls.o mmu.o irq.o
\
obj-y
:=
enlighten.o setup.o multicalls.o mmu.o irq.o
\
time.o xen-asm_
$(BITS)
.o grant-table.o suspend.o
time.o xen-asm.o xen-asm_
$(BITS)
.o
\
grant-table.o suspend.o
obj-$(CONFIG_SMP)
+=
smp.o spinlock.o
obj-$(CONFIG_SMP)
+=
smp.o spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS)
+=
debugfs.o
obj-$(CONFIG_XEN_DEBUG_FS)
+=
debugfs.o
\ No newline at end of file
arch/x86/xen/enlighten.c
浏览文件 @
69b745ff
此差异已折叠。
点击以展开。
arch/x86/xen/irq.c
浏览文件 @
69b745ff
...
@@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void)
...
@@ -50,6 +50,7 @@ static unsigned long xen_save_fl(void)
*/
*/
return
(
-
flags
)
&
X86_EFLAGS_IF
;
return
(
-
flags
)
&
X86_EFLAGS_IF
;
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_save_fl
);
static
void
xen_restore_fl
(
unsigned
long
flags
)
static
void
xen_restore_fl
(
unsigned
long
flags
)
{
{
...
@@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags)
...
@@ -76,6 +77,7 @@ static void xen_restore_fl(unsigned long flags)
xen_force_evtchn_callback
();
xen_force_evtchn_callback
();
}
}
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_restore_fl
);
static
void
xen_irq_disable
(
void
)
static
void
xen_irq_disable
(
void
)
{
{
...
@@ -86,6 +88,7 @@ static void xen_irq_disable(void)
...
@@ -86,6 +88,7 @@ static void xen_irq_disable(void)
percpu_read
(
xen_vcpu
)
->
evtchn_upcall_mask
=
1
;
percpu_read
(
xen_vcpu
)
->
evtchn_upcall_mask
=
1
;
preempt_enable_no_resched
();
preempt_enable_no_resched
();
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_irq_disable
);
static
void
xen_irq_enable
(
void
)
static
void
xen_irq_enable
(
void
)
{
{
...
@@ -106,6 +109,7 @@ static void xen_irq_enable(void)
...
@@ -106,6 +109,7 @@ static void xen_irq_enable(void)
if
(
unlikely
(
vcpu
->
evtchn_upcall_pending
))
if
(
unlikely
(
vcpu
->
evtchn_upcall_pending
))
xen_force_evtchn_callback
();
xen_force_evtchn_callback
();
}
}
PV_CALLEE_SAVE_REGS_THUNK
(
xen_irq_enable
);
static
void
xen_safe_halt
(
void
)
static
void
xen_safe_halt
(
void
)
{
{
...
@@ -124,10 +128,12 @@ static void xen_halt(void)
...
@@ -124,10 +128,12 @@ static void xen_halt(void)
static
const
struct
pv_irq_ops
xen_irq_ops
__initdata
=
{
static
const
struct
pv_irq_ops
xen_irq_ops
__initdata
=
{
.
init_IRQ
=
__xen_init_IRQ
,
.
init_IRQ
=
__xen_init_IRQ
,
.
save_fl
=
xen_save_fl
,
.
restore_fl
=
xen_restore_fl
,
.
save_fl
=
PV_CALLEE_SAVE
(
xen_save_fl
),
.
irq_disable
=
xen_irq_disable
,
.
restore_fl
=
PV_CALLEE_SAVE
(
xen_restore_fl
),
.
irq_enable
=
xen_irq_enable
,
.
irq_disable
=
PV_CALLEE_SAVE
(
xen_irq_disable
),
.
irq_enable
=
PV_CALLEE_SAVE
(
xen_irq_enable
),
.
safe_halt
=
xen_safe_halt
,
.
safe_halt
=
xen_safe_halt
,
.
halt
=
xen_halt
,
.
halt
=
xen_halt
,
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
...
...
arch/x86/xen/mmu.c
浏览文件 @
69b745ff
此差异已折叠。
点击以展开。
arch/x86/xen/mmu.h
浏览文件 @
69b745ff
...
@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t
...
@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t
void
xen_ptep_modify_prot_commit
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
void
xen_ptep_modify_prot_commit
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
,
pte_t
pte
);
pte_t
*
ptep
,
pte_t
pte
);
unsigned
long
xen_read_cr2_direct
(
void
);
extern
const
struct
pv_mmu_ops
xen_mmu_ops
;
#endif
/* _XEN_MMU_H */
#endif
/* _XEN_MMU_H */
arch/x86/xen/smp.c
浏览文件 @
69b745ff
...
@@ -170,8 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
...
@@ -170,8 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
/* We've switched to the "real" per-cpu gdt, so make sure the
/* We've switched to the "real" per-cpu gdt, so make sure the
old memory can be recycled */
old memory can be recycled */
make_lowmem_page_readwrite
(
__per_cpu_load
+
make_lowmem_page_readwrite
(
xen_initial_gdt
);
(
unsigned
long
)
&
per_cpu_var
(
gdt_page
));
xen_setup_vcpu_info_placement
();
xen_setup_vcpu_info_placement
();
}
}
...
@@ -287,6 +286,9 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
...
@@ -287,6 +286,9 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
irq_ctx_init
(
cpu
);
irq_ctx_init
(
cpu
);
#else
#else
clear_tsk_thread_flag
(
idle
,
TIF_FORK
);
clear_tsk_thread_flag
(
idle
,
TIF_FORK
);
per_cpu
(
kernel_stack
,
cpu
)
=
(
unsigned
long
)
task_stack_page
(
idle
)
-
KERNEL_STACK_OFFSET
+
THREAD_SIZE
;
#endif
#endif
xen_setup_timer
(
cpu
);
xen_setup_timer
(
cpu
);
xen_init_lock_cpu
(
cpu
);
xen_init_lock_cpu
(
cpu
);
...
...
arch/x86/xen/xen-asm.S
0 → 100644
浏览文件 @
69b745ff
/*
Asm
versions
of
Xen
pv
-
ops
,
suitable
for
either
direct
use
or
inlining
.
The
inline
versions
are
the
same
as
the
direct
-
use
versions
,
with
the
pre
-
and
post
-
amble
chopped
off
.
This
code
is
encoded
for
size
rather
than
absolute
efficiency
,
with
a
view
to
being
able
to
inline
as
much
as
possible
.
We
only
bother
with
direct
forms
(
ie
,
vcpu
in
percpu
data
)
of
the
operations
here
; the indirect forms are better handled in
C
,
since
they
're generally too large to inline anyway.
*/
#include <asm/asm-offsets.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h>
#include "xen-asm.h"
/*
Enable
events
.
This
clears
the
event
mask
and
tests
the
pending
event
status
with
one
and
operation
.
If
there
are
pending
events
,
then
enter
the
hypervisor
to
get
them
handled
.
*/
ENTRY
(
xen_irq_enable_direct
)
/
*
Unmask
events
*/
movb
$
0
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
/
*
Preempt
here
doesn
't matter because that will deal with
any
pending
interrupts
.
The
pending
check
may
end
up
being
run
on
the
wrong
CPU
,
but
that
doesn
't hurt. */
/
*
Test
for
pending
*/
testb
$
0xff
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_pending
jz
1
f
2
:
call
check_events
1
:
ENDPATCH
(
xen_irq_enable_direct
)
ret
ENDPROC
(
xen_irq_enable_direct
)
RELOC
(
xen_irq_enable_direct
,
2
b
+
1
)
/*
Disabling
events
is
simply
a
matter
of
making
the
event
mask
non
-
zero
.
*/
ENTRY
(
xen_irq_disable_direct
)
movb
$
1
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
ENDPATCH
(
xen_irq_disable_direct
)
ret
ENDPROC
(
xen_irq_disable_direct
)
RELOC
(
xen_irq_disable_direct
,
0
)
/*
(
xen_
)
save_fl
is
used
to
get
the
current
interrupt
enable
status
.
Callers
expect
the
status
to
be
in
X86_EFLAGS_IF
,
and
other
bits
may
be
set
in
the
return
value
.
We
take
advantage
of
this
by
making
sure
that
X86_EFLAGS_IF
has
the
right
value
(
and
other
bits
in
that
byte
are
0
),
but
other
bits
in
the
return
value
are
undefined
.
We
need
to
toggle
the
state
of
the
bit
,
because
Xen
and
x86
use
opposite
senses
(
mask
vs
enable
)
.
*/
ENTRY
(
xen_save_fl_direct
)
testb
$
0xff
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
setz
%
ah
addb
%
ah
,%
ah
ENDPATCH
(
xen_save_fl_direct
)
ret
ENDPROC
(
xen_save_fl_direct
)
RELOC
(
xen_save_fl_direct
,
0
)
/*
In
principle
the
caller
should
be
passing
us
a
value
return
from
xen_save_fl_direct
,
but
for
robustness
sake
we
test
only
the
X86_EFLAGS_IF
flag
rather
than
the
whole
byte
.
After
setting
the
interrupt
mask
state
,
it
checks
for
unmasked
pending
events
and
enters
the
hypervisor
to
get
them
delivered
if
so
.
*/
ENTRY
(
xen_restore_fl_direct
)
#ifdef CONFIG_X86_64
testw
$X86_EFLAGS_IF
,
%
di
#else
testb
$X86_EFLAGS_IF
>>
8
,
%
ah
#endif
setz
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
/
*
Preempt
here
doesn
't matter because that will deal with
any
pending
interrupts
.
The
pending
check
may
end
up
being
run
on
the
wrong
CPU
,
but
that
doesn
't hurt. */
/
*
check
for
unmasked
and
pending
*/
cmpw
$
0x0001
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_pending
jz
1
f
2
:
call
check_events
1
:
ENDPATCH
(
xen_restore_fl_direct
)
ret
ENDPROC
(
xen_restore_fl_direct
)
RELOC
(
xen_restore_fl_direct
,
2
b
+
1
)
/*
Force
an
event
check
by
making
a
hypercall
,
but
preserve
regs
before
making
the
call
.
*/
check_events
:
#ifdef CONFIG_X86_32
push
%
eax
push
%
ecx
push
%
edx
call
xen_force_evtchn_callback
pop
%
edx
pop
%
ecx
pop
%
eax
#else
push
%
rax
push
%
rcx
push
%
rdx
push
%
rsi
push
%
rdi
push
%
r8
push
%
r9
push
%
r10
push
%
r11
call
xen_force_evtchn_callback
pop
%
r11
pop
%
r10
pop
%
r9
pop
%
r8
pop
%
rdi
pop
%
rsi
pop
%
rdx
pop
%
rcx
pop
%
rax
#endif
ret
arch/x86/xen/xen-asm.h
0 → 100644
浏览文件 @
69b745ff
#ifndef _XEN_XEN_ASM_H
#define _XEN_XEN_ASM_H
#include <linux/linkage.h>
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
#define ENDPATCH(x) .globl x##_end; x##_end=.
/* Pseudo-flag used for virtual NMI, which we don't implement yet */
#define XEN_EFLAGS_NMI 0x80000000
#endif
arch/x86/xen/xen-asm_32.S
浏览文件 @
69b745ff
...
@@ -11,101 +11,28 @@
...
@@ -11,101 +11,28 @@
generally
too
large
to
inline
anyway
.
generally
too
large
to
inline
anyway
.
*/
*/
#include <linux/linkage.h>
//#
include
<
asm
/
asm
-
offsets.h
>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/thread_info.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
#include <asm/segment.h>
#include <xen/interface/xen.h>
#include <xen/interface/xen.h>
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
#include "xen-asm.h"
#define ENDPATCH(x) .globl x##_end; x##_end=.
/*
Pseudo
-
flag
used
for
virtual
NMI
,
which
we
don
't implement yet */
#define XEN_EFLAGS_NMI 0x80000000
/*
Enable
events
.
This
clears
the
event
mask
and
tests
the
pending
event
status
with
one
and
operation
.
If
there
are
pending
events
,
then
enter
the
hypervisor
to
get
them
handled
.
*/
ENTRY
(
xen_irq_enable_direct
)
/
*
Unmask
events
*/
movb
$
0
,
PER_CPU_VAR
(
xen_vcpu_info
)+
XEN_vcpu_info_mask
/
*
Preempt
here
doesn
't matter because that will deal with
any
pending
interrupts
.
The
pending
check
may
end
up
being
run
on
the
wrong
CPU
,
but
that
doesn
't hurt. */
/
*
Test
for
pending
*/
testb
$
0xff
,
PER_CPU_VAR
(
xen_vcpu_info
)+
XEN_vcpu_info_pending
jz
1
f
2
:
call
check_events
1
:
ENDPATCH
(
xen_irq_enable_direct
)
ret
ENDPROC
(
xen_irq_enable_direct
)
RELOC
(
xen_irq_enable_direct
,
2
b
+
1
)
/*
/*
Disabling
events
is
simply
a
matter
of
making
the
event
mask
Force
an
event
check
by
making
a
hypercall
,
non
-
zero
.
but
preserve
regs
before
making
the
call
.
*/
ENTRY
(
xen_irq_disable_direct
)
movb
$
1
,
PER_CPU_VAR
(
xen_vcpu_info
)+
XEN_vcpu_info_mask
ENDPATCH
(
xen_irq_disable_direct
)
ret
ENDPROC
(
xen_irq_disable_direct
)
RELOC
(
xen_irq_disable_direct
,
0
)
/*
(
xen_
)
save_fl
is
used
to
get
the
current
interrupt
enable
status
.
Callers
expect
the
status
to
be
in
X86_EFLAGS_IF
,
and
other
bits
may
be
set
in
the
return
value
.
We
take
advantage
of
this
by
making
sure
that
X86_EFLAGS_IF
has
the
right
value
(
and
other
bits
in
that
byte
are
0
),
but
other
bits
in
the
return
value
are
undefined
.
We
need
to
toggle
the
state
of
the
bit
,
because
Xen
and
x86
use
opposite
senses
(
mask
vs
enable
)
.
*/
ENTRY
(
xen_save_fl_direct
)
testb
$
0xff
,
PER_CPU_VAR
(
xen_vcpu_info
)+
XEN_vcpu_info_mask
setz
%
ah
addb
%
ah
,%
ah
ENDPATCH
(
xen_save_fl_direct
)
ret
ENDPROC
(
xen_save_fl_direct
)
RELOC
(
xen_save_fl_direct
,
0
)
/*
In
principle
the
caller
should
be
passing
us
a
value
return
from
xen_save_fl_direct
,
but
for
robustness
sake
we
test
only
the
X86_EFLAGS_IF
flag
rather
than
the
whole
byte
.
After
setting
the
interrupt
mask
state
,
it
checks
for
unmasked
pending
events
and
enters
the
hypervisor
to
get
them
delivered
if
so
.
*/
*/
ENTRY
(
xen_restore_fl_direct
)
check_events
:
testb
$X86_EFLAGS_IF
>>
8
,
%
ah
push
%
eax
setz
PER_CPU_VAR
(
xen_vcpu_info
)+
XEN_vcpu_info_mask
push
%
ecx
/
*
Preempt
here
doesn
't matter because that will deal with
push
%
edx
any
pending
interrupts
.
The
pending
check
may
end
up
being
call
xen_force_evtchn_callback
run
on
the
wrong
CPU
,
but
that
doesn
't hurt. */
pop
%
edx
pop
%
ecx
/
*
check
for
unmasked
and
pending
*/
pop
%
eax
cmpw
$
0x0001
,
PER_CPU_VAR
(
xen_vcpu_info
)+
XEN_vcpu_info_pending
jz
1
f
2
:
call
check_events
1
:
ENDPATCH
(
xen_restore_fl_direct
)
ret
ret
ENDPROC
(
xen_restore_fl_direct
)
RELOC
(
xen_restore_fl_direct
,
2
b
+
1
)
/*
/*
We
can
't use sysexit directly, because we'
re
not
running
in
ring0
.
We
can
't use sysexit directly, because we'
re
not
running
in
ring0
.
...
@@ -289,17 +216,3 @@ ENTRY(xen_iret_crit_fixup)
...
@@ -289,17 +216,3 @@ ENTRY(xen_iret_crit_fixup)
lea
4
(%
edi
),%
esp
/*
point
esp
to
new
frame
*/
lea
4
(%
edi
),%
esp
/*
point
esp
to
new
frame
*/
2
:
jmp
xen_do_upcall
2
:
jmp
xen_do_upcall
/*
Force
an
event
check
by
making
a
hypercall
,
but
preserve
regs
before
making
the
call
.
*/
check_events
:
push
%
eax
push
%
ecx
push
%
edx
call
xen_force_evtchn_callback
pop
%
edx
pop
%
ecx
pop
%
eax
ret
arch/x86/xen/xen-asm_64.S
浏览文件 @
69b745ff
...
@@ -11,142 +11,14 @@
...
@@ -11,142 +11,14 @@
generally
too
large
to
inline
anyway
.
generally
too
large
to
inline
anyway
.
*/
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
#include <asm/errno.h>
#include <asm/errno.h>
#include <asm/segment.h>
#include <asm/percpu.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
#include <xen/interface/xen.h>
#include <xen/interface/xen.h>
#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
#include "xen-asm.h"
#define ENDPATCH(x) .globl x##_end; x##_end=.
/*
Pseudo
-
flag
used
for
virtual
NMI
,
which
we
don
't implement yet */
#define XEN_EFLAGS_NMI 0x80000000
#if 1
/*
FIXME
:
x86_64
now
can
support
direct
access
to
percpu
variables
via
a
segment
override
.
Update
xen
accordingly
.
*/
#define BUG ud2a
#endif
/*
Enable
events
.
This
clears
the
event
mask
and
tests
the
pending
event
status
with
one
and
operation
.
If
there
are
pending
events
,
then
enter
the
hypervisor
to
get
them
handled
.
*/
ENTRY
(
xen_irq_enable_direct
)
BUG
/
*
Unmask
events
*/
movb
$
0
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
/
*
Preempt
here
doesn
't matter because that will deal with
any
pending
interrupts
.
The
pending
check
may
end
up
being
run
on
the
wrong
CPU
,
but
that
doesn
't hurt. */
/
*
Test
for
pending
*/
testb
$
0xff
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_pending
jz
1
f
2
:
call
check_events
1
:
ENDPATCH
(
xen_irq_enable_direct
)
ret
ENDPROC
(
xen_irq_enable_direct
)
RELOC
(
xen_irq_enable_direct
,
2
b
+
1
)
/*
Disabling
events
is
simply
a
matter
of
making
the
event
mask
non
-
zero
.
*/
ENTRY
(
xen_irq_disable_direct
)
BUG
movb
$
1
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
ENDPATCH
(
xen_irq_disable_direct
)
ret
ENDPROC
(
xen_irq_disable_direct
)
RELOC
(
xen_irq_disable_direct
,
0
)
/*
(
xen_
)
save_fl
is
used
to
get
the
current
interrupt
enable
status
.
Callers
expect
the
status
to
be
in
X86_EFLAGS_IF
,
and
other
bits
may
be
set
in
the
return
value
.
We
take
advantage
of
this
by
making
sure
that
X86_EFLAGS_IF
has
the
right
value
(
and
other
bits
in
that
byte
are
0
),
but
other
bits
in
the
return
value
are
undefined
.
We
need
to
toggle
the
state
of
the
bit
,
because
Xen
and
x86
use
opposite
senses
(
mask
vs
enable
)
.
*/
ENTRY
(
xen_save_fl_direct
)
BUG
testb
$
0xff
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
setz
%
ah
addb
%
ah
,%
ah
ENDPATCH
(
xen_save_fl_direct
)
ret
ENDPROC
(
xen_save_fl_direct
)
RELOC
(
xen_save_fl_direct
,
0
)
/*
In
principle
the
caller
should
be
passing
us
a
value
return
from
xen_save_fl_direct
,
but
for
robustness
sake
we
test
only
the
X86_EFLAGS_IF
flag
rather
than
the
whole
byte
.
After
setting
the
interrupt
mask
state
,
it
checks
for
unmasked
pending
events
and
enters
the
hypervisor
to
get
them
delivered
if
so
.
*/
ENTRY
(
xen_restore_fl_direct
)
BUG
testb
$X86_EFLAGS_IF
>>
8
,
%
ah
setz
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_mask
/
*
Preempt
here
doesn
't matter because that will deal with
any
pending
interrupts
.
The
pending
check
may
end
up
being
run
on
the
wrong
CPU
,
but
that
doesn
't hurt. */
/
*
check
for
unmasked
and
pending
*/
cmpw
$
0x0001
,
PER_CPU_VAR
(
xen_vcpu_info
)
+
XEN_vcpu_info_pending
jz
1
f
2
:
call
check_events
1
:
ENDPATCH
(
xen_restore_fl_direct
)
ret
ENDPROC
(
xen_restore_fl_direct
)
RELOC
(
xen_restore_fl_direct
,
2
b
+
1
)
/*
Force
an
event
check
by
making
a
hypercall
,
but
preserve
regs
before
making
the
call
.
*/
check_events
:
push
%
rax
push
%
rcx
push
%
rdx
push
%
rsi
push
%
rdi
push
%
r8
push
%
r9
push
%
r10
push
%
r11
call
xen_force_evtchn_callback
pop
%
r11
pop
%
r10
pop
%
r9
pop
%
r8
pop
%
rdi
pop
%
rsi
pop
%
rdx
pop
%
rcx
pop
%
rax
ret
ENTRY
(
xen_adjust_exception_frame
)
ENTRY
(
xen_adjust_exception_frame
)
mov
8
+
0
(%
rsp
),%
rcx
mov
8
+
0
(%
rsp
),%
rcx
...
...
arch/x86/xen/xen-ops.h
浏览文件 @
69b745ff
...
@@ -10,9 +10,12 @@
...
@@ -10,9 +10,12 @@
extern
const
char
xen_hypervisor_callback
[];
extern
const
char
xen_hypervisor_callback
[];
extern
const
char
xen_failsafe_callback
[];
extern
const
char
xen_failsafe_callback
[];
extern
void
*
xen_initial_gdt
;
struct
trap_info
;
struct
trap_info
;
void
xen_copy_trap_info
(
struct
trap_info
*
traps
);
void
xen_copy_trap_info
(
struct
trap_info
*
traps
);
DECLARE_PER_CPU
(
struct
vcpu_info
,
xen_vcpu_info
);
DECLARE_PER_CPU
(
unsigned
long
,
xen_cr3
);
DECLARE_PER_CPU
(
unsigned
long
,
xen_cr3
);
DECLARE_PER_CPU
(
unsigned
long
,
xen_current_cr3
);
DECLARE_PER_CPU
(
unsigned
long
,
xen_current_cr3
);
...
@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info;
...
@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info;
void
xen_setup_mfn_list_list
(
void
);
void
xen_setup_mfn_list_list
(
void
);
void
xen_setup_shared_info
(
void
);
void
xen_setup_shared_info
(
void
);
void
xen_setup_machphys_mapping
(
void
);
pgd_t
*
xen_setup_kernel_pagetable
(
pgd_t
*
pgd
,
unsigned
long
max_pfn
);
void
xen_ident_map_ISA
(
void
);
void
xen_reserve_top
(
void
);
void
xen_leave_lazy
(
void
);
void
xen_post_allocator_init
(
void
);
char
*
__init
xen_memory_setup
(
void
);
char
*
__init
xen_memory_setup
(
void
);
void
__init
xen_arch_setup
(
void
);
void
__init
xen_arch_setup
(
void
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录