提交 efdfce2b 编写于 作者: L Linus Torvalds

Merge tag 'please-pull-paravirt' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull ia64 paravirt removal from Tony Luck:
 "Nobody cares about paravirtualization on ia64 anymore"

* tag 'please-pull-paravirt' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  ia64: remove paravirt code
...@@ -137,29 +137,6 @@ config AUDIT_ARCH ...@@ -137,29 +137,6 @@ config AUDIT_ARCH
bool bool
default y default y
menuconfig PARAVIRT_GUEST
bool "Paravirtualized guest support"
depends on BROKEN
help
Say Y here to get to see options related to running Linux under
various hypervisors. This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and disabled.
if PARAVIRT_GUEST
config PARAVIRT
bool "Enable paravirtualization code"
depends on PARAVIRT_GUEST
default y
help
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
endif
choice choice
prompt "System type" prompt "System type"
default IA64_GENERIC default IA64_GENERIC
......
...@@ -15,11 +15,7 @@ ...@@ -15,11 +15,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/smp.h> #include <asm/smp.h>
#ifndef CONFIG_PARAVIRT
typedef u8 ia64_vector; typedef u8 ia64_vector;
#else
typedef u16 ia64_vector;
#endif
/* /*
* 0 special * 0 special
...@@ -114,15 +110,11 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq); ...@@ -114,15 +110,11 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
#ifdef CONFIG_PARAVIRT_GUEST
#include <asm/paravirt.h>
#else
#define ia64_register_ipi ia64_native_register_ipi #define ia64_register_ipi ia64_native_register_ipi
#define assign_irq_vector ia64_native_assign_irq_vector #define assign_irq_vector ia64_native_assign_irq_vector
#define free_irq_vector ia64_native_free_irq_vector #define free_irq_vector ia64_native_free_irq_vector
#define register_percpu_irq ia64_native_register_percpu_irq #define register_percpu_irq ia64_native_register_percpu_irq
#define ia64_resend_irq ia64_native_resend_irq #define ia64_resend_irq ia64_native_resend_irq
#endif
extern void ia64_native_register_ipi(void); extern void ia64_native_register_ipi(void);
extern int bind_irq_vector(int irq, int vector, cpumask_t domain); extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
......
...@@ -7,19 +7,6 @@ ...@@ -7,19 +7,6 @@
#ifndef _ASM_IA64_INTRINSICS_H #ifndef _ASM_IA64_INTRINSICS_H
#define _ASM_IA64_INTRINSICS_H #define _ASM_IA64_INTRINSICS_H
#include <asm/paravirt_privop.h>
#include <uapi/asm/intrinsics.h> #include <uapi/asm/intrinsics.h>
#ifndef __ASSEMBLY__
#if defined(CONFIG_PARAVIRT)
# undef IA64_INTRINSIC_API
# undef IA64_INTRINSIC_MACRO
# ifdef ASM_SUPPORTED
# define IA64_INTRINSIC_API(name) paravirt_ ## name
# else
# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
# endif
#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_INTRINSICS_H */ #endif /* _ASM_IA64_INTRINSICS_H */
...@@ -55,14 +55,10 @@ ...@@ -55,14 +55,10 @@
#define NR_IOSAPICS 256 #define NR_IOSAPICS 256
#ifdef CONFIG_PARAVIRT_GUEST
#include <asm/paravirt.h>
#else
#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init #define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
#define __iosapic_read __ia64_native_iosapic_read #define __iosapic_read __ia64_native_iosapic_read
#define __iosapic_write __ia64_native_iosapic_write #define __iosapic_write __ia64_native_iosapic_write
#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip #define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
#endif
extern void __init ia64_native_iosapic_pcat_compat_init(void); extern void __init ia64_native_iosapic_pcat_compat_init(void);
extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger); extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
......
...@@ -18,12 +18,6 @@ struct mod_arch_specific { ...@@ -18,12 +18,6 @@ struct mod_arch_specific {
struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */ struct elf64_shdr *unwind; /* unwind-table section */
#ifdef CONFIG_PARAVIRT
struct elf64_shdr *paravirt_bundles;
/* paravirt_alt_bundle_patch table */
struct elf64_shdr *paravirt_insts;
/* paravirt_alt_inst_patch table */
#endif
unsigned long gp; /* global-pointer for module */ unsigned long gp; /* global-pointer for module */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */
......
...@@ -22,32 +22,6 @@ ...@@ -22,32 +22,6 @@
#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN #define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
#define __paravirt_switch_to ia64_native_switch_to
#define __paravirt_leave_syscall ia64_native_leave_syscall
#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
#define __paravirt_leave_kernel ia64_native_leave_kernel
#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
#define __paravirt_work_processed_syscall_target \
ia64_work_processed_syscall
#define paravirt_fsyscall_table ia64_native_fsyscall_table
#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
# define PARAVIRT_POISON 0xdeadbeefbaadf00d
# define CLOBBER(clob) \
;; \
movl clob = PARAVIRT_POISON; \
;;
# define CLOBBER_PRED(pred_clob) \
;; \
cmp.eq pred_clob, p0 = r0, r0 \
;;
#else
# define CLOBBER(clob) /* nothing */
# define CLOBBER_PRED(pred_clob) /* nothing */
#endif
#define MOV_FROM_IFA(reg) \ #define MOV_FROM_IFA(reg) \
mov reg = cr.ifa mov reg = cr.ifa
...@@ -70,106 +44,76 @@ ...@@ -70,106 +44,76 @@
mov reg = cr.iip mov reg = cr.iip
#define MOV_FROM_IVR(reg, clob) \ #define MOV_FROM_IVR(reg, clob) \
mov reg = cr.ivr \ mov reg = cr.ivr
CLOBBER(clob)
#define MOV_FROM_PSR(pred, reg, clob) \ #define MOV_FROM_PSR(pred, reg, clob) \
(pred) mov reg = psr \ (pred) mov reg = psr
CLOBBER(clob)
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
(pred) mov reg = ar.itc \ (pred) mov reg = ar.itc
CLOBBER(clob) \
CLOBBER_PRED(pred_clob)
#define MOV_TO_IFA(reg, clob) \ #define MOV_TO_IFA(reg, clob) \
mov cr.ifa = reg \ mov cr.ifa = reg
CLOBBER(clob)
#define MOV_TO_ITIR(pred, reg, clob) \ #define MOV_TO_ITIR(pred, reg, clob) \
(pred) mov cr.itir = reg \ (pred) mov cr.itir = reg
CLOBBER(clob)
#define MOV_TO_IHA(pred, reg, clob) \ #define MOV_TO_IHA(pred, reg, clob) \
(pred) mov cr.iha = reg \ (pred) mov cr.iha = reg
CLOBBER(clob)
#define MOV_TO_IPSR(pred, reg, clob) \ #define MOV_TO_IPSR(pred, reg, clob) \
(pred) mov cr.ipsr = reg \ (pred) mov cr.ipsr = reg
CLOBBER(clob)
#define MOV_TO_IFS(pred, reg, clob) \ #define MOV_TO_IFS(pred, reg, clob) \
(pred) mov cr.ifs = reg \ (pred) mov cr.ifs = reg
CLOBBER(clob)
#define MOV_TO_IIP(reg, clob) \ #define MOV_TO_IIP(reg, clob) \
mov cr.iip = reg \ mov cr.iip = reg
CLOBBER(clob)
#define MOV_TO_KR(kr, reg, clob0, clob1) \ #define MOV_TO_KR(kr, reg, clob0, clob1) \
mov IA64_KR(kr) = reg \ mov IA64_KR(kr) = reg
CLOBBER(clob0) \
CLOBBER(clob1)
#define ITC_I(pred, reg, clob) \ #define ITC_I(pred, reg, clob) \
(pred) itc.i reg \ (pred) itc.i reg
CLOBBER(clob)
#define ITC_D(pred, reg, clob) \ #define ITC_D(pred, reg, clob) \
(pred) itc.d reg \ (pred) itc.d reg
CLOBBER(clob)
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
(pred_i) itc.i reg; \ (pred_i) itc.i reg; \
(pred_d) itc.d reg \ (pred_d) itc.d reg
CLOBBER(clob)
#define THASH(pred, reg0, reg1, clob) \ #define THASH(pred, reg0, reg1, clob) \
(pred) thash reg0 = reg1 \ (pred) thash reg0 = reg1
CLOBBER(clob)
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
ssm psr.ic | PSR_DEFAULT_BITS \ ssm psr.ic | PSR_DEFAULT_BITS \
CLOBBER(clob0) \
CLOBBER(clob1) \
;; \ ;; \
srlz.i /* guarantee that interruption collectin is on */ \ srlz.i /* guarantee that interruption collectin is on */ \
;; ;;
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
ssm psr.ic \ ssm psr.ic \
CLOBBER(clob0) \
CLOBBER(clob1) \
;; \ ;; \
srlz.d srlz.d
#define RSM_PSR_IC(clob) \ #define RSM_PSR_IC(clob) \
rsm psr.ic \ rsm psr.ic
CLOBBER(clob)
#define SSM_PSR_I(pred, pred_clob, clob) \ #define SSM_PSR_I(pred, pred_clob, clob) \
(pred) ssm psr.i \ (pred) ssm psr.i
CLOBBER(clob) \
CLOBBER_PRED(pred_clob)
#define RSM_PSR_I(pred, clob0, clob1) \ #define RSM_PSR_I(pred, clob0, clob1) \
(pred) rsm psr.i \ (pred) rsm psr.i
CLOBBER(clob0) \
CLOBBER(clob1)
#define RSM_PSR_I_IC(clob0, clob1, clob2) \ #define RSM_PSR_I_IC(clob0, clob1, clob2) \
rsm psr.i | psr.ic \ rsm psr.i | psr.ic
CLOBBER(clob0) \
CLOBBER(clob1) \
CLOBBER(clob2)
#define RSM_PSR_DT \ #define RSM_PSR_DT \
rsm psr.dt rsm psr.dt
#define RSM_PSR_BE_I(clob0, clob1) \ #define RSM_PSR_BE_I(clob0, clob1) \
rsm psr.be | psr.i \ rsm psr.be | psr.i
CLOBBER(clob0) \
CLOBBER(clob1)
#define SSM_PSR_DT_AND_SRLZ_I \ #define SSM_PSR_DT_AND_SRLZ_I \
ssm psr.dt \ ssm psr.dt \
...@@ -177,15 +121,10 @@ ...@@ -177,15 +121,10 @@
srlz.i srlz.i
#define BSW_0(clob0, clob1, clob2) \ #define BSW_0(clob0, clob1, clob2) \
bsw.0 \ bsw.0
CLOBBER(clob0) \
CLOBBER(clob1) \
CLOBBER(clob2)
#define BSW_1(clob0, clob1) \ #define BSW_1(clob0, clob1) \
bsw.1 \ bsw.1
CLOBBER(clob0) \
CLOBBER(clob1)
#define COVER \ #define COVER \
cover cover
......
#ifndef _ASM_NATIVE_PVCHK_INST_H
#define _ASM_NATIVE_PVCHK_INST_H
/******************************************************************************
* arch/ia64/include/asm/native/pvchk_inst.h
* Checker for paravirtualizations of privileged operations.
*
* Copyright (C) 2005 Hewlett-Packard Co
* Dan Magenheimer <dan.magenheimer@hp.com>
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/**********************************************
* Instructions paravirtualized for correctness
**********************************************/
/* "fc" and "thash" are privilege-sensitive instructions, meaning they
* may have different semantics depending on whether they are executed
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
* be allowed to execute directly, lest incorrect semantics result.
*/
#define fc .error "fc should not be used directly."
#define thash .error "thash should not be used directly."
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
* is not currently used (though it may be in a long-format VHPT system!)
* and the semantics of cover only change if psr.ic is off which is very
* rare (and currently non-existent outside of assembly code
*/
#define ttag .error "ttag should not be used directly."
#define cover .error "cover should not be used directly."
/* There are also privilege-sensitive registers. These registers are
* readable at any privilege level but only writable at PL0.
*/
#define cpuid .error "cpuid should not be used directly."
#define pmd .error "pmd should not be used directly."
/*
* mov ar.eflag =
* mov = ar.eflag
*/
/**********************************************
* Instructions paravirtualized for performance
**********************************************/
/*
* Those instructions include '.' which can't be handled by cpp.
* or can't be handled by cpp easily.
* They are handled by sed instead of cpp.
*/
/* for .S
* itc.i
* itc.d
*
* bsw.0
* bsw.1
*
* ssm psr.ic | PSR_DEFAULT_BITS
* ssm psr.ic
* rsm psr.ic
* ssm psr.i
* rsm psr.i
* rsm psr.i | psr.ic
* rsm psr.dt
* ssm psr.dt
*
* mov = cr.ifa
* mov = cr.itir
* mov = cr.isr
* mov = cr.iha
* mov = cr.ipsr
* mov = cr.iim
* mov = cr.iip
* mov = cr.ivr
* mov = psr
*
* mov cr.ifa =
* mov cr.itir =
* mov cr.iha =
* mov cr.ipsr =
* mov cr.ifs =
* mov cr.iip =
* mov cr.kr =
*/
/* for intrinsics
* ssm psr.i
* rsm psr.i
* mov = psr
* mov = ivr
* mov = tpr
* mov cr.itm =
* mov eoi =
* mov rr[] =
* mov = rr[]
* mov = kr
* mov kr =
* ptc.ga
*/
/*************************************************************
* define paravirtualized instrcution macros as nop to ingore.
* and check whether arguments are appropriate.
*************************************************************/
/* check whether reg is a regular register */
.macro is_rreg_in reg
.ifc "\reg", "r0"
nop 0
.exitm
.endif
;;
mov \reg = r0
;;
.endm
#define IS_RREG_IN(reg) is_rreg_in reg ;
#define IS_RREG_OUT(reg) \
;; \
mov reg = r0 \
;;
#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
/* check whether pred is a predicate register */
#define IS_PRED_IN(pred) \
;; \
(pred) nop 0 \
;;
#define IS_PRED_OUT(pred) \
;; \
cmp.eq pred, p0 = r0, r0 \
;;
#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
nop 0
#define MOV_FROM_IFA(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_ITIR(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_ISR(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IHA(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IPSR(pred, reg) \
IS_PRED_IN(pred) \
IS_RREG_OUT(reg)
#define MOV_FROM_IIM(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IIP(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IVR(reg, clob) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
#define MOV_FROM_PSR(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
IS_PRED_IN(pred) \
IS_PRED_CLOB(pred_clob) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IFA(reg, clob) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_ITIR(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IHA(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IPSR(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IFS(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IIP(reg, clob) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_KR(kr, reg, clob0, clob1) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define ITC_I(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define ITC_D(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
IS_PRED_IN(pred_i) \
IS_PRED_IN(pred_d) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define THASH(pred, reg0, reg1, clob) \
IS_PRED_IN(pred) \
IS_RREG_OUT(reg0) \
IS_RREG_IN(reg1) \
IS_RREG_CLOB(clob)
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define RSM_PSR_IC(clob) \
IS_RREG_CLOB(clob)
#define SSM_PSR_I(pred, pred_clob, clob) \
IS_PRED_IN(pred) \
IS_PRED_CLOB(pred_clob) \
IS_RREG_CLOB(clob)
#define RSM_PSR_I(pred, clob0, clob1) \
IS_PRED_IN(pred) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1) \
IS_RREG_CLOB(clob2)
#define RSM_PSR_DT \
nop 0
#define RSM_PSR_BE_I(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define SSM_PSR_DT_AND_SRLZ_I \
nop 0
#define BSW_0(clob0, clob1, clob2) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1) \
IS_RREG_CLOB(clob2)
#define BSW_1(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define COVER \
nop 0
#define RFI \
br.ret.sptk.many rp /* defining nop causes dependency error */
#endif /* _ASM_NATIVE_PVCHK_INST_H */
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef __ASM_PARAVIRT_H
#define __ASM_PARAVIRT_H
#ifndef __ASSEMBLY__
/******************************************************************************
* fsys related addresses
*/
struct pv_fsys_data {
unsigned long *fsyscall_table;
void *fsys_bubble_down;
};
extern struct pv_fsys_data pv_fsys_data;
unsigned long *paravirt_get_fsyscall_table(void);
char *paravirt_get_fsys_bubble_down(void);
/******************************************************************************
* patchlist addresses for gate page
*/
enum pv_gate_patchlist {
PV_GATE_START_FSYSCALL,
PV_GATE_END_FSYSCALL,
PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
PV_GATE_START_VTOP,
PV_GATE_END_VTOP,
PV_GATE_START_MCKINLEY_E9,
PV_GATE_END_MCKINLEY_E9,
};
struct pv_patchdata {
unsigned long start_fsyscall_patchlist;
unsigned long end_fsyscall_patchlist;
unsigned long start_brl_fsys_bubble_down_patchlist;
unsigned long end_brl_fsys_bubble_down_patchlist;
unsigned long start_vtop_patchlist;
unsigned long end_vtop_patchlist;
unsigned long start_mckinley_e9_patchlist;
unsigned long end_mckinley_e9_patchlist;
void *gate_section;
};
extern struct pv_patchdata pv_patchdata;
unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
void *paravirt_get_gate_section(void);
#endif
#ifdef CONFIG_PARAVIRT_GUEST
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
#ifndef __ASSEMBLY__
#include <asm/hw_irq.h>
#include <asm/meminit.h>
/******************************************************************************
* general info
*/
struct pv_info {
unsigned int kernel_rpl;
int paravirt_enabled;
const char *name;
};
extern struct pv_info pv_info;
static inline int paravirt_enabled(void)
{
return pv_info.paravirt_enabled;
}
static inline unsigned int get_kernel_rpl(void)
{
return pv_info.kernel_rpl;
}
/******************************************************************************
* initialization hooks.
*/
struct rsvd_region;
struct pv_init_ops {
void (*banner)(void);
int (*reserve_memory)(struct rsvd_region *region);
void (*arch_setup_early)(void);
void (*arch_setup_console)(char **cmdline_p);
int (*arch_setup_nomca)(void);
void (*post_smp_prepare_boot_cpu)(void);
#ifdef ASM_SUPPORTED
unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
unsigned long type);
unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
unsigned long type);
#endif
void (*patch_branch)(unsigned long tag, unsigned long type);
};
extern struct pv_init_ops pv_init_ops;
static inline void paravirt_banner(void)
{
if (pv_init_ops.banner)
pv_init_ops.banner();
}
static inline int paravirt_reserve_memory(struct rsvd_region *region)
{
if (pv_init_ops.reserve_memory)
return pv_init_ops.reserve_memory(region);
return 0;
}
static inline void paravirt_arch_setup_early(void)
{
if (pv_init_ops.arch_setup_early)
pv_init_ops.arch_setup_early();
}
static inline void paravirt_arch_setup_console(char **cmdline_p)
{
if (pv_init_ops.arch_setup_console)
pv_init_ops.arch_setup_console(cmdline_p);
}
static inline int paravirt_arch_setup_nomca(void)
{
if (pv_init_ops.arch_setup_nomca)
return pv_init_ops.arch_setup_nomca();
return 0;
}
static inline void paravirt_post_smp_prepare_boot_cpu(void)
{
if (pv_init_ops.post_smp_prepare_boot_cpu)
pv_init_ops.post_smp_prepare_boot_cpu();
}
/******************************************************************************
* replacement of iosapic operations.
*/
struct pv_iosapic_ops {
void (*pcat_compat_init)(void);
struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
};
extern struct pv_iosapic_ops pv_iosapic_ops;
static inline void
iosapic_pcat_compat_init(void)
{
if (pv_iosapic_ops.pcat_compat_init)
pv_iosapic_ops.pcat_compat_init();
}
static inline struct irq_chip*
iosapic_get_irq_chip(unsigned long trigger)
{
return pv_iosapic_ops.__get_irq_chip(trigger);
}
static inline unsigned int
__iosapic_read(char __iomem *iosapic, unsigned int reg)
{
return pv_iosapic_ops.__read(iosapic, reg);
}
static inline void
__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
return pv_iosapic_ops.__write(iosapic, reg, val);
}
/******************************************************************************
* replacement of irq operations.
*/
struct pv_irq_ops {
void (*register_ipi)(void);
int (*assign_irq_vector)(int irq);
void (*free_irq_vector)(int vector);
void (*register_percpu_irq)(ia64_vector vec,
struct irqaction *action);
void (*resend_irq)(unsigned int vector);
};
extern struct pv_irq_ops pv_irq_ops;
static inline void
ia64_register_ipi(void)
{
pv_irq_ops.register_ipi();
}
static inline int
assign_irq_vector(int irq)
{
return pv_irq_ops.assign_irq_vector(irq);
}
static inline void
free_irq_vector(int vector)
{
return pv_irq_ops.free_irq_vector(vector);
}
static inline void
register_percpu_irq(ia64_vector vec, struct irqaction *action)
{
pv_irq_ops.register_percpu_irq(vec, action);
}
static inline void
ia64_resend_irq(unsigned int vector)
{
pv_irq_ops.resend_irq(vector);
}
/******************************************************************************
* replacement of time operations.
*/
extern struct itc_jitter_data_t itc_jitter_data;
extern volatile int time_keeper_id;
struct pv_time_ops {
void (*init_missing_ticks_accounting)(int cpu);
int (*do_steal_accounting)(unsigned long *new_itm);
void (*clocksource_resume)(void);
unsigned long long (*sched_clock)(void);
};
extern struct pv_time_ops pv_time_ops;
static inline void
paravirt_init_missing_ticks_accounting(int cpu)
{
if (pv_time_ops.init_missing_ticks_accounting)
pv_time_ops.init_missing_ticks_accounting(cpu);
}
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
static inline int
paravirt_do_steal_accounting(unsigned long *new_itm)
{
return pv_time_ops.do_steal_accounting(new_itm);
}
static inline unsigned long long paravirt_sched_clock(void)
{
return pv_time_ops.sched_clock();
}
#endif /* !__ASSEMBLY__ */
#else
/* fallback for native case */
#ifndef __ASSEMBLY__
#define paravirt_banner() do { } while (0)
#define paravirt_reserve_memory(region) 0
#define paravirt_arch_setup_early() do { } while (0)
#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
#define paravirt_arch_setup_nomca() 0
#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
#define paravirt_do_steal_accounting(new_itm) 0
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* __ASM_PARAVIRT_H */
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef __ASM_PARAVIRT_PATCH_H
#define __ASM_PARAVIRT_PATCH_H
#ifdef __ASSEMBLY__
.section .paravirt_branches, "a"
.previous
#define PARAVIRT_PATCH_SITE_BR(type) \
{ \
[1:] ; \
br.cond.sptk.many 2f ; \
nop.b 0 ; \
nop.b 0;; ; \
} ; \
2: \
.xdata8 ".paravirt_branches", 1b, type
#else
#include <linux/stringify.h>
#include <asm/intrinsics.h>
/* for binary patch */
struct paravirt_patch_site_bundle {
void *sbundle;
void *ebundle;
unsigned long type;
};
/* label means the beginning of new bundle */
#define paravirt_alt_bundle(instr, privop) \
"\t998:\n" \
"\t" instr "\n" \
"\t999:\n" \
"\t.pushsection .paravirt_bundles, \"a\"\n" \
"\t.popsection\n" \
"\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
__stringify(privop) "\n"
struct paravirt_patch_bundle_elem {
const void *sbundle;
const void *ebundle;
unsigned long type;
};
struct paravirt_patch_site_inst {
unsigned long stag;
unsigned long etag;
unsigned long type;
};
#define paravirt_alt_inst(instr, privop) \
"\t[998:]\n" \
"\t" instr "\n" \
"\t[999:]\n" \
"\t.pushsection .paravirt_insts, \"a\"\n" \
"\t.popsection\n" \
"\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
__stringify(privop) "\n"
struct paravirt_patch_site_branch {
unsigned long tag;
unsigned long type;
};
struct paravirt_patch_branch_target {
const void *entry;
unsigned long type;
};
void
__paravirt_patch_apply_branch(
unsigned long tag, unsigned long type,
const struct paravirt_patch_branch_target *entries,
unsigned int nr_entries);
void
paravirt_patch_reloc_br(unsigned long tag, const void *target);
void
paravirt_patch_reloc_brl(unsigned long tag, const void *target);
#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
unsigned long
ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
unsigned long
__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
const struct paravirt_patch_bundle_elem *elems,
unsigned long nelems,
const struct paravirt_patch_bundle_elem **found);
void
paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
const struct paravirt_patch_site_bundle *end);
void
paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
const struct paravirt_patch_site_inst *end);
void paravirt_patch_apply(void);
#else
#define paravirt_patch_apply_bundle(start, end) do { } while (0)
#define paravirt_patch_apply_inst(start, end) do { } while (0)
#define paravirt_patch_apply() do { } while (0)
#endif
#endif /* !__ASSEMBLEY__ */
#endif /* __ASM_PARAVIRT_PATCH_H */
/*
* Local variables:
* mode: C
* c-set-style: "linux"
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*/
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
#define _ASM_IA64_PARAVIRT_PRIVOP_H
#ifdef CONFIG_PARAVIRT
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/kregs.h> /* for IA64_PSR_I */
/******************************************************************************
* replacement of intrinsics operations.
*/
struct pv_cpu_ops {
void (*fc)(void *addr);
unsigned long (*thash)(unsigned long addr);
unsigned long (*get_cpuid)(int index);
unsigned long (*get_pmd)(int index);
unsigned long (*getreg)(int reg);
void (*setreg)(int reg, unsigned long val);
void (*ptcga)(unsigned long addr, unsigned long size);
unsigned long (*get_rr)(unsigned long index);
void (*set_rr)(unsigned long index, unsigned long val);
void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
unsigned long val2, unsigned long val3,
unsigned long val4);
void (*ssm_i)(void);
void (*rsm_i)(void);
unsigned long (*get_psr_i)(void);
void (*intrin_local_irq_restore)(unsigned long flags);
};
extern struct pv_cpu_ops pv_cpu_ops;
extern void ia64_native_setreg_func(int regnum, unsigned long val);
extern unsigned long ia64_native_getreg_func(int regnum);
/************************************************/
/* Instructions paravirtualized for performance */
/************************************************/
#ifndef ASM_SUPPORTED
#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
#define __paravirt_getreg() pv_cpu_ops.getreg()
#endif
/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
* static inline function doesn't satisfy it. */
#define paravirt_ssm(mask) \
do { \
if ((mask) == IA64_PSR_I) \
paravirt_ssm_i(); \
else \
ia64_native_ssm(mask); \
} while (0)
#define paravirt_rsm(mask) \
do { \
if ((mask) == IA64_PSR_I) \
paravirt_rsm_i(); \
else \
ia64_native_rsm(mask); \
} while (0)
/* returned ip value should be the one in the caller,
* not in __paravirt_getreg() */
#define paravirt_getreg(reg) \
({ \
unsigned long res; \
if ((reg) == _IA64_REG_IP) \
res = ia64_native_getreg(_IA64_REG_IP); \
else \
res = __paravirt_getreg(reg); \
res; \
})
/******************************************************************************
* replacement of hand written assembly codes.
*/
struct pv_cpu_asm_switch {
unsigned long switch_to;
unsigned long leave_syscall;
unsigned long work_processed_syscall;
unsigned long leave_kernel;
};
void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
#endif /* __ASSEMBLY__ */
#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
#else
/* fallback for native case */
#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
#endif /* CONFIG_PARAVIRT */
#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
#else
#define paravirt_dv_serialize_data() /* nothing */
#endif
/* these routines utilize privilege-sensitive or performance-sensitive
* privileged instructions so the code must be replaced with
* paravirtualized versions */
#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
#define ia64_work_processed_syscall \
IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
#if defined(CONFIG_PARAVIRT)
/******************************************************************************
* binary patching infrastructure
*/
#define PARAVIRT_PATCH_TYPE_FC 1
#define PARAVIRT_PATCH_TYPE_THASH 2
#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
#define PARAVIRT_PATCH_TYPE_GET_PMD 4
#define PARAVIRT_PATCH_TYPE_PTCGA 5
#define PARAVIRT_PATCH_TYPE_GET_RR 6
#define PARAVIRT_PATCH_TYPE_SET_RR 7
#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
#define PARAVIRT_PATCH_TYPE_SSM_I 9
#define PARAVIRT_PATCH_TYPE_RSM_I 10
#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
/*
* struct task_struct* (*ia64_switch_to)(void* next_task);
* void *ia64_leave_syscall;
* void *ia64_work_processed_syscall
* void *ia64_leave_kernel;
*/
#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
(PARAVIRT_PATCH_TYPE_BR_START + 0)
#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
(PARAVIRT_PATCH_TYPE_BR_START + 1)
#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
(PARAVIRT_PATCH_TYPE_BR_START + 2)
#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
(PARAVIRT_PATCH_TYPE_BR_START + 3)
#ifdef ASM_SUPPORTED
#include <asm/paravirt_patch.h>
/*
* pv_cpu_ops calling stub.
* normal function call convension can't be written by gcc
* inline assembly.
*
* from the caller's point of view,
* the following registers will be clobbered.
* r2, r3
* r8-r15
* r16, r17
* b6, b7
* p6-p15
* ar.ccv
*
* from the callee's point of view ,
* the following registers can be used.
* r2, r3: scratch
* r8: scratch, input argument0 and return value
* r0-r15: scratch, input argument1-5
* b6: return pointer
* b7: scratch
* p6-p15: scratch
* ar.ccv: scratch
*
* other registers must not be changed. especially
* b0: rp: preserved. gcc ignores b0 in clobbered register.
* r16: saved gp
*/
/* 5 bundles */
#define __PARAVIRT_BR \
";;\n" \
"{ .mlx\n" \
"nop 0\n" \
"movl r2 = %[op_addr]\n"/* get function pointer address */ \
";;\n" \
"}\n" \
"1:\n" \
"{ .mii\n" \
"ld8 r2 = [r2]\n" /* load function descriptor address */ \
"mov r17 = ip\n" /* get ip to calc return address */ \
"mov r16 = gp\n" /* save gp */ \
";;\n" \
"}\n" \
"{ .mii\n" \
"ld8 r3 = [r2], 8\n" /* load entry address */ \
"adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
";;\n" \
"mov b7 = r3\n" /* set entry address */ \
"}\n" \
"{ .mib\n" \
"ld8 gp = [r2]\n" /* load gp value */ \
"mov b6 = r17\n" /* set return address */ \
"br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
"}\n" \
"1:\n" \
"{ .mii\n" \
"mov gp = r16\n" /* restore gp value */ \
"nop 0\n" \
"nop 0\n" \
";;\n" \
"}\n"
#define PARAVIRT_OP(op) \
[op_addr] "i"(&pv_cpu_ops.op)
#define PARAVIRT_TYPE(type) \
PARAVIRT_PATCH_TYPE_ ## type
#define PARAVIRT_REG_CLOBBERS0 \
"r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
"r15", "r16", "r17"
#define PARAVIRT_REG_CLOBBERS1 \
"r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
"r15", "r16", "r17"
#define PARAVIRT_REG_CLOBBERS2 \
"r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
"r15", "r16", "r17"
#define PARAVIRT_REG_CLOBBERS5 \
"r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
"r15", "r16", "r17"
#define PARAVIRT_BR_CLOBBERS \
"b6", "b7"
#define PARAVIRT_PR_CLOBBERS \
"p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
#define PARAVIRT_AR_CLOBBERS \
"ar.ccv"
#define PARAVIRT_CLOBBERS0 \
PARAVIRT_REG_CLOBBERS0, \
PARAVIRT_BR_CLOBBERS, \
PARAVIRT_PR_CLOBBERS, \
PARAVIRT_AR_CLOBBERS, \
"memory"
#define PARAVIRT_CLOBBERS1 \
PARAVIRT_REG_CLOBBERS1, \
PARAVIRT_BR_CLOBBERS, \
PARAVIRT_PR_CLOBBERS, \
PARAVIRT_AR_CLOBBERS, \
"memory"
#define PARAVIRT_CLOBBERS2 \
PARAVIRT_REG_CLOBBERS2, \
PARAVIRT_BR_CLOBBERS, \
PARAVIRT_PR_CLOBBERS, \
PARAVIRT_AR_CLOBBERS, \
"memory"
#define PARAVIRT_CLOBBERS5 \
PARAVIRT_REG_CLOBBERS5, \
PARAVIRT_BR_CLOBBERS, \
PARAVIRT_PR_CLOBBERS, \
PARAVIRT_AR_CLOBBERS, \
"memory"
#define PARAVIRT_BR0(op, type) \
register unsigned long ia64_clobber asm ("r8"); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(type)) \
: "=r"(ia64_clobber) \
: PARAVIRT_OP(op) \
: PARAVIRT_CLOBBERS0)
#define PARAVIRT_BR0_RET(op, type) \
register unsigned long ia64_intri_res asm ("r8"); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(type)) \
: "=r"(ia64_intri_res) \
: PARAVIRT_OP(op) \
: PARAVIRT_CLOBBERS0)
#define PARAVIRT_BR1(op, type, arg1) \
register unsigned long __##arg1 asm ("r8") = arg1; \
register unsigned long ia64_clobber asm ("r8"); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(type)) \
: "=r"(ia64_clobber) \
: PARAVIRT_OP(op), "0"(__##arg1) \
: PARAVIRT_CLOBBERS1)
#define PARAVIRT_BR1_RET(op, type, arg1) \
register unsigned long ia64_intri_res asm ("r8"); \
register unsigned long __##arg1 asm ("r8") = arg1; \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(type)) \
: "=r"(ia64_intri_res) \
: PARAVIRT_OP(op), "0"(__##arg1) \
: PARAVIRT_CLOBBERS1)
#define PARAVIRT_BR1_VOID(op, type, arg1) \
register void *__##arg1 asm ("r8") = arg1; \
register unsigned long ia64_clobber asm ("r8"); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(type)) \
: "=r"(ia64_clobber) \
: PARAVIRT_OP(op), "0"(__##arg1) \
: PARAVIRT_CLOBBERS1)
#define PARAVIRT_BR2(op, type, arg1, arg2) \
register unsigned long __##arg1 asm ("r8") = arg1; \
register unsigned long __##arg2 asm ("r9") = arg2; \
register unsigned long ia64_clobber1 asm ("r8"); \
register unsigned long ia64_clobber2 asm ("r9"); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(type)) \
: "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
: PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
: PARAVIRT_CLOBBERS2)
#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
static inline void \
paravirt_ ## op (void) \
{ \
PARAVIRT_BR0(op, type); \
}
#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
static inline unsigned long \
paravirt_ ## op (void) \
{ \
PARAVIRT_BR0_RET(op, type); \
return ia64_intri_res; \
}
#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
static inline void \
paravirt_ ## op (void *arg1) \
{ \
PARAVIRT_BR1_VOID(op, type, arg1); \
}
#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
static inline void \
paravirt_ ## op (unsigned long arg1) \
{ \
PARAVIRT_BR1(op, type, arg1); \
}
#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
static inline unsigned long \
paravirt_ ## op (unsigned long arg1) \
{ \
PARAVIRT_BR1_RET(op, type, arg1); \
return ia64_intri_res; \
}
#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
static inline void \
paravirt_ ## op (unsigned long arg1, \
unsigned long arg2) \
{ \
PARAVIRT_BR2(op, type, arg1, arg2); \
}
PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
static inline void
paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
unsigned long val2, unsigned long val3,
unsigned long val4)
{
register unsigned long __val0 asm ("r8") = val0;
register unsigned long __val1 asm ("r9") = val1;
register unsigned long __val2 asm ("r10") = val2;
register unsigned long __val3 asm ("r11") = val3;
register unsigned long __val4 asm ("r14") = val4;
register unsigned long ia64_clobber0 asm ("r8");
register unsigned long ia64_clobber1 asm ("r9");
register unsigned long ia64_clobber2 asm ("r10");
register unsigned long ia64_clobber3 asm ("r11");
register unsigned long ia64_clobber4 asm ("r14");
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
PARAVIRT_TYPE(SET_RR0_TO_RR4))
: "=r"(ia64_clobber0),
"=r"(ia64_clobber1),
"=r"(ia64_clobber2),
"=r"(ia64_clobber3),
"=r"(ia64_clobber4)
: PARAVIRT_OP(set_rr0_to_rr4),
"0"(__val0), "1"(__val1), "2"(__val2),
"3"(__val3), "4"(__val4)
: PARAVIRT_CLOBBERS5);
}
/* unsigned long paravirt_getreg(int reg) */
#define __paravirt_getreg(reg) \
({ \
register unsigned long ia64_intri_res asm ("r8"); \
register unsigned long __reg asm ("r8") = (reg); \
\
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(GETREG) \
+ (reg)) \
: "=r"(ia64_intri_res) \
: PARAVIRT_OP(getreg), "0"(__reg) \
: PARAVIRT_CLOBBERS1); \
\
ia64_intri_res; \
})
/* void paravirt_setreg(int reg, unsigned long val) */
#define paravirt_setreg(reg, val) \
do { \
register unsigned long __val asm ("r8") = val; \
register unsigned long __reg asm ("r9") = reg; \
register unsigned long ia64_clobber1 asm ("r8"); \
register unsigned long ia64_clobber2 asm ("r9"); \
\
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(SETREG) \
+ (reg)) \
: "=r"(ia64_clobber1), \
"=r"(ia64_clobber2) \
: PARAVIRT_OP(setreg), \
"1"(__reg), "0"(__val) \
: PARAVIRT_CLOBBERS2); \
} while (0)
#endif /* ASM_SUPPORTED */
#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
...@@ -9,7 +9,7 @@ endif ...@@ -9,7 +9,7 @@ endif
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o dma-mapping.o unwind.o mca.o mca_asm.o topology.o dma-mapping.o
...@@ -35,9 +35,6 @@ mca_recovery-y += mca_drv.o mca_drv_asm.o ...@@ -35,9 +35,6 @@ mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
paravirt_patch.o
obj-$(CONFIG_IA64_ESI) += esi.o obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),) ifneq ($(CONFIG_IA64_ESI),)
obj-y += esi_stub.o # must be in kernel proper obj-y += esi_stub.o # must be in kernel proper
...@@ -52,8 +49,6 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 ...@@ -52,8 +49,6 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
# The gate DSO image is built using a special linker script. # The gate DSO image is built using a special linker script.
include $(src)/Makefile.gate include $(src)/Makefile.gate
# tell compiled for native
CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
define sed-y define sed-y
...@@ -84,30 +79,3 @@ arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c ...@@ -84,30 +79,3 @@ arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
$(Q)mkdir -p $(dir $@) $(Q)mkdir -p $(dir $@)
$(call cmd,nr_irqs) $(call cmd,nr_irqs)
#
# native ivt.S, entry.S and fsys.S
#
ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o
define paravirtualized_native
AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
extra-y += pvchk-$(1)
endef
$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
#
# Checker for paravirtualizations of privileged operations.
#
quiet_cmd_pv_check_sed = PVCHK $@
define cmd_pv_check_sed
sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
endef
$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
$(call if_changed_dep,as_s_S)
$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
$(call if_changed,pv_check_sed)
$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
$(call if_changed,as_o_S)
.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
...@@ -464,7 +464,6 @@ efi_map_pal_code (void) ...@@ -464,7 +464,6 @@ efi_map_pal_code (void)
GRANULEROUNDDOWN((unsigned long) pal_vaddr), GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT); IA64_GRANULE_SHIFT);
paravirt_dv_serialize_data();
ia64_set_psr(psr); /* restore psr */ ia64_set_psr(psr); /* restore psr */
} }
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
#include "minstate.h" #include "minstate.h"
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
/* /*
* execve() is special because in case of success, we need to * execve() is special because in case of success, we need to
* setup a null register window frame. * setup a null register window frame.
...@@ -161,7 +160,6 @@ GLOBAL_ENTRY(sys_clone) ...@@ -161,7 +160,6 @@ GLOBAL_ENTRY(sys_clone)
mov rp=loc0 mov rp=loc0
br.ret.sptk.many rp br.ret.sptk.many rp
END(sys_clone) END(sys_clone)
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
/* /*
* prev_task <- ia64_switch_to(struct task_struct *next) * prev_task <- ia64_switch_to(struct task_struct *next)
...@@ -169,7 +167,7 @@ END(sys_clone) ...@@ -169,7 +167,7 @@ END(sys_clone)
* called. The code starting at .map relies on this. The rest of the code * called. The code starting at .map relies on this. The rest of the code
* doesn't care about the interrupt masking status. * doesn't care about the interrupt masking status.
*/ */
GLOBAL_ENTRY(__paravirt_switch_to) GLOBAL_ENTRY(ia64_switch_to)
.prologue .prologue
alloc r16=ar.pfs,1,0,0,0 alloc r16=ar.pfs,1,0,0,0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
...@@ -221,9 +219,8 @@ GLOBAL_ENTRY(__paravirt_switch_to) ...@@ -221,9 +219,8 @@ GLOBAL_ENTRY(__paravirt_switch_to)
itr.d dtr[r25]=r23 // wire in new mapping... itr.d dtr[r25]=r23 // wire in new mapping...
SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
br.cond.sptk .done br.cond.sptk .done
END(__paravirt_switch_to) END(ia64_switch_to)
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
/* /*
* Note that interrupts are enabled during save_switch_stack and load_switch_stack. This * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
* means that we may get an interrupt with "sp" pointing to the new kernel stack while * means that we may get an interrupt with "sp" pointing to the new kernel stack while
...@@ -639,16 +636,8 @@ GLOBAL_ENTRY(ia64_ret_from_syscall) ...@@ -639,16 +636,8 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
mov r10=r0 // clear error indication in r10 mov r10=r0 // clear error indication in r10
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
#ifdef CONFIG_PARAVIRT
;;
br.cond.sptk.few ia64_leave_syscall
;;
#endif /* CONFIG_PARAVIRT */
END(ia64_ret_from_syscall) END(ia64_ret_from_syscall)
#ifndef CONFIG_PARAVIRT
// fall through // fall through
#endif
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
/* /*
* ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
...@@ -694,7 +683,7 @@ END(ia64_ret_from_syscall) ...@@ -694,7 +683,7 @@ END(ia64_ret_from_syscall)
* ar.csd: cleared * ar.csd: cleared
* ar.ssd: cleared * ar.ssd: cleared
*/ */
GLOBAL_ENTRY(__paravirt_leave_syscall) GLOBAL_ENTRY(ia64_leave_syscall)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
/* /*
* work.need_resched etc. mustn't get changed by this CPU before it returns to * work.need_resched etc. mustn't get changed by this CPU before it returns to
...@@ -722,8 +711,8 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) ...@@ -722,8 +711,8 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#endif #endif
.global __paravirt_work_processed_syscall; .global ia64_work_processed_syscall;
__paravirt_work_processed_syscall: ia64_work_processed_syscall:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
adds r2=PT(LOADRS)+16,r12 adds r2=PT(LOADRS)+16,r12
MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
...@@ -836,9 +825,9 @@ __paravirt_work_processed_syscall: ...@@ -836,9 +825,9 @@ __paravirt_work_processed_syscall:
mov.m ar.ssd=r0 // M2 clear ar.ssd mov.m ar.ssd=r0 // M2 clear ar.ssd
mov f11=f0 // F clear f11 mov f11=f0 // F clear f11
br.cond.sptk.many rbs_switch // B br.cond.sptk.many rbs_switch // B
END(__paravirt_leave_syscall) END(ia64_leave_syscall)
GLOBAL_ENTRY(__paravirt_leave_kernel) GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
/* /*
* work.need_resched etc. mustn't get changed by this CPU before it returns to * work.need_resched etc. mustn't get changed by this CPU before it returns to
...@@ -1171,26 +1160,25 @@ skip_rbs_switch: ...@@ -1171,26 +1160,25 @@ skip_rbs_switch:
(p6) br.cond.sptk.few .notify (p6) br.cond.sptk.few .notify
br.call.spnt.many rp=preempt_schedule_irq br.call.spnt.many rp=preempt_schedule_irq
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel br.cond.sptk.many .work_processed_kernel
.notify: .notify:
(pUStk) br.call.spnt.many rp=notify_resume_user (pUStk) br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel br.cond.sptk.many .work_processed_kernel
.global __paravirt_pending_syscall_end; .global ia64_work_pending_syscall_end;
__paravirt_pending_syscall_end: ia64_work_pending_syscall_end:
adds r2=PT(R8)+16,r12 adds r2=PT(R8)+16,r12
adds r3=PT(R10)+16,r12 adds r3=PT(R10)+16,r12
;; ;;
ld8 r8=[r2] ld8 r8=[r2]
ld8 r10=[r3] ld8 r10=[r3]
br.cond.sptk.many __paravirt_work_processed_syscall_target br.cond.sptk.many ia64_work_processed_syscall
END(__paravirt_leave_kernel) END(ia64_leave_kernel)
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
ENTRY(handle_syscall_error) ENTRY(handle_syscall_error)
/* /*
* Some system calls (e.g., ptrace, mmap) can return arbitrary values which could * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
...@@ -1294,7 +1282,7 @@ ENTRY(sys_rt_sigreturn) ...@@ -1294,7 +1282,7 @@ ENTRY(sys_rt_sigreturn)
adds sp=16,sp adds sp=16,sp
;; ;;
ld8 r9=[sp] // load new ar.unat ld8 r9=[sp] // load new ar.unat
mov.sptk b7=r8,ia64_native_leave_kernel mov.sptk b7=r8,ia64_leave_kernel
;; ;;
mov ar.unat=r9 mov ar.unat=r9
br.many b7 br.many b7
...@@ -1782,4 +1770,3 @@ sys_call_table: ...@@ -1782,4 +1770,3 @@ sys_call_table:
data8 sys_execveat data8 sys_execveat
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include "entry.h" #include "entry.h"
#include "paravirt_inst.h" #include <asm/native/inst.h>
/* /*
* See Documentation/ia64/fsys.txt for details on fsyscalls. * See Documentation/ia64/fsys.txt for details on fsyscalls.
...@@ -402,7 +402,7 @@ ENTRY(fsys_fallback_syscall) ...@@ -402,7 +402,7 @@ ENTRY(fsys_fallback_syscall)
mov r26=ar.pfs mov r26=ar.pfs
END(fsys_fallback_syscall) END(fsys_fallback_syscall)
/* FALL THROUGH */ /* FALL THROUGH */
GLOBAL_ENTRY(paravirt_fsys_bubble_down) GLOBAL_ENTRY(fsys_bubble_down)
.prologue .prologue
.altrp b6 .altrp b6
.body .body
...@@ -440,7 +440,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) ...@@ -440,7 +440,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
* *
* PSR.BE : already is turned off in __kernel_syscall_via_epc() * PSR.BE : already is turned off in __kernel_syscall_via_epc()
* PSR.AC : don't care (kernel normally turns PSR.AC on) * PSR.AC : don't care (kernel normally turns PSR.AC on)
* PSR.I : already turned off by the time paravirt_fsys_bubble_down gets * PSR.I : already turned off by the time fsys_bubble_down gets
* invoked * invoked
* PSR.DFL: always 0 (kernel never turns it on) * PSR.DFL: always 0 (kernel never turns it on)
* PSR.DFH: don't care --- kernel never touches f32-f127 on its own * PSR.DFH: don't care --- kernel never touches f32-f127 on its own
...@@ -450,7 +450,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) ...@@ -450,7 +450,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
* PSR.DB : don't care --- kernel never enables kernel-level * PSR.DB : don't care --- kernel never enables kernel-level
* breakpoints * breakpoints
* PSR.TB : must be 0 already; if it wasn't zero on entry to * PSR.TB : must be 0 already; if it wasn't zero on entry to
* __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down * __kernel_syscall_via_epc, the branch to fsys_bubble_down
* will trigger a taken branch; the taken-trap-handler then * will trigger a taken branch; the taken-trap-handler then
* converts the syscall into a break-based system-call. * converts the syscall into a break-based system-call.
*/ */
...@@ -541,14 +541,14 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) ...@@ -541,14 +541,14 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
nop.m 0 nop.m 0
(p8) br.call.sptk.many b6=b6 // B (ignore return address) (p8) br.call.sptk.many b6=b6 // B (ignore return address)
br.cond.spnt ia64_trace_syscall // B br.cond.spnt ia64_trace_syscall // B
END(paravirt_fsys_bubble_down) END(fsys_bubble_down)
.rodata .rodata
.align 8 .align 8
.globl paravirt_fsyscall_table .globl fsyscall_table
data8 paravirt_fsys_bubble_down data8 fsys_bubble_down
paravirt_fsyscall_table: fsyscall_table:
data8 fsys_ni_syscall data8 fsys_ni_syscall
data8 0 // exit // 1025 data8 0 // exit // 1025
data8 0 // read data8 0 // read
...@@ -833,4 +833,4 @@ paravirt_fsyscall_table: ...@@ -833,4 +833,4 @@ paravirt_fsyscall_table:
// fill in zeros for the remaining entries // fill in zeros for the remaining entries
.zero: .zero:
.space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0 .space fsyscall_table + 8*NR_syscalls - .zero, 0
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/page.h> #include <asm/page.h>
#include "paravirt_inst.h" #include <asm/native/inst.h>
/* /*
* We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
...@@ -376,11 +376,4 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc) ...@@ -376,11 +376,4 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
(p9) mov r8=ENOSYS (p9) mov r8=ENOSYS
FSYS_RETURN FSYS_RETURN
#ifdef CONFIG_PARAVIRT
/*
* padd to make the size of this symbol constant
* independent of paravirtualization.
*/
.align PAGE_SIZE / 8
#endif
END(__kernel_syscall_via_epc) END(__kernel_syscall_via_epc)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
*/ */
#include <asm/page.h> #include <asm/page.h>
#include "paravirt_patchlist.h"
SECTIONS SECTIONS
{ {
...@@ -33,21 +32,21 @@ SECTIONS ...@@ -33,21 +32,21 @@ SECTIONS
. = GATE_ADDR + 0x600; . = GATE_ADDR + 0x600;
.data..patch : { .data..patch : {
__paravirt_start_gate_mckinley_e9_patchlist = .; __start_gate_mckinley_e9_patchlist = .;
*(.data..patch.mckinley_e9) *(.data..patch.mckinley_e9)
__paravirt_end_gate_mckinley_e9_patchlist = .; __end_gate_mckinley_e9_patchlist = .;
__paravirt_start_gate_vtop_patchlist = .; __start_gate_vtop_patchlist = .;
*(.data..patch.vtop) *(.data..patch.vtop)
__paravirt_end_gate_vtop_patchlist = .; __end_gate_vtop_patchlist = .;
__paravirt_start_gate_fsyscall_patchlist = .; __start_gate_fsyscall_patchlist = .;
*(.data..patch.fsyscall_table) *(.data..patch.fsyscall_table)
__paravirt_end_gate_fsyscall_patchlist = .; __end_gate_fsyscall_patchlist = .;
__paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; __start_gate_brl_fsys_bubble_down_patchlist = .;
*(.data..patch.brl_fsys_bubble_down) *(.data..patch.brl_fsys_bubble_down)
__paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; __end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable } :readable
.IA_64.unwind_info : { *(.IA_64.unwind_info*) } .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/pal.h> #include <asm/pal.h>
#include <asm/paravirt.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -394,41 +393,6 @@ start_ap: ...@@ -394,41 +393,6 @@ start_ap:
;; ;;
(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
#ifdef CONFIG_PARAVIRT
movl r14=hypervisor_setup_hooks
movl r15=hypervisor_type
mov r16=num_hypervisor_hooks
;;
ld8 r2=[r15]
;;
cmp.ltu p7,p0=r2,r16 // array size check
shladd r8=r2,3,r14
;;
(p7) ld8 r9=[r8]
;;
(p7) mov b1=r9
(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
;;
(p7) br.call.sptk.many rp=b1
__INITDATA
default_setup_hook = 0 // Currently nothing needs to be done.
.global hypervisor_type
hypervisor_type:
data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
// must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
hypervisor_setup_hooks:
data8 default_setup_hook
num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
.previous
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
(isAP) br.call.sptk.many rp=start_secondary (isAP) br.call.sptk.many rp=start_secondary
.ret0: .ret0:
...@@ -1063,12 +1027,6 @@ GLOBAL_ENTRY(ia64_native_sched_clock) ...@@ -1063,12 +1027,6 @@ GLOBAL_ENTRY(ia64_native_sched_clock)
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia64_native_sched_clock) END(ia64_native_sched_clock)
#ifndef CONFIG_PARAVIRT
//unsigned long long
//sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
.global sched_clock
sched_clock = ia64_native_sched_clock
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
GLOBAL_ENTRY(cycle_to_cputime) GLOBAL_ENTRY(cycle_to_cputime)
......
...@@ -937,7 +937,6 @@ END(interrupt) ...@@ -937,7 +937,6 @@ END(interrupt)
* - ar.fpsr: set to kernel settings * - ar.fpsr: set to kernel settings
* - b6: preserved (same as on entry) * - b6: preserved (same as on entry)
*/ */
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
GLOBAL_ENTRY(ia64_syscall_setup) GLOBAL_ENTRY(ia64_syscall_setup)
#if PT(B6) != 0 #if PT(B6) != 0
# error This code assumes that b6 is the first field in pt_regs. # error This code assumes that b6 is the first field in pt_regs.
...@@ -1029,7 +1028,6 @@ GLOBAL_ENTRY(ia64_syscall_setup) ...@@ -1029,7 +1028,6 @@ GLOBAL_ENTRY(ia64_syscall_setup)
(p10) mov r8=-EINVAL (p10) mov r8=-EINVAL
br.ret.sptk.many b7 br.ret.sptk.many b7
END(ia64_syscall_setup) END(ia64_syscall_setup)
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
.org ia64_ivt+0x3c00 .org ia64_ivt+0x3c00
///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////
...@@ -1043,7 +1041,7 @@ END(ia64_syscall_setup) ...@@ -1043,7 +1041,7 @@ END(ia64_syscall_setup)
DBG_FAULT(16) DBG_FAULT(16)
FAULT(16) FAULT(16)
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
/* /*
* There is no particular reason for this code to be here, other than * There is no particular reason for this code to be here, other than
* that there happens to be space here that would go unused otherwise. * that there happens to be space here that would go unused otherwise.
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include "entry.h" #include "entry.h"
#include "paravirt_inst.h" #include <asm/native/inst.h>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */ /* read ar.itc in advance, and use it before leaving bank 0 */
......
...@@ -439,14 +439,6 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, ...@@ -439,14 +439,6 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
mod->arch.opd = s; mod->arch.opd = s;
else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
mod->arch.unwind = s; mod->arch.unwind = s;
#ifdef CONFIG_PARAVIRT
else if (strcmp(".paravirt_bundles",
secstrings + s->sh_name) == 0)
mod->arch.paravirt_bundles = s;
else if (strcmp(".paravirt_insts",
secstrings + s->sh_name) == 0)
mod->arch.paravirt_insts = s;
#endif
if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
printk(KERN_ERR "%s: sections missing\n", mod->name); printk(KERN_ERR "%s: sections missing\n", mod->name);
...@@ -914,30 +906,6 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo ...@@ -914,30 +906,6 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
DEBUGP("%s: init: entry=%p\n", __func__, mod->init); DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind) if (mod->arch.unwind)
register_unwind_table(mod); register_unwind_table(mod);
#ifdef CONFIG_PARAVIRT
if (mod->arch.paravirt_bundles) {
struct paravirt_patch_site_bundle *start =
(struct paravirt_patch_site_bundle *)
mod->arch.paravirt_bundles->sh_addr;
struct paravirt_patch_site_bundle *end =
(struct paravirt_patch_site_bundle *)
(mod->arch.paravirt_bundles->sh_addr +
mod->arch.paravirt_bundles->sh_size);
paravirt_patch_apply_bundle(start, end);
}
if (mod->arch.paravirt_insts) {
struct paravirt_patch_site_inst *start =
(struct paravirt_patch_site_inst *)
mod->arch.paravirt_insts->sh_addr;
struct paravirt_patch_site_inst *end =
(struct paravirt_patch_site_inst *)
(mod->arch.paravirt_insts->sh_addr +
mod->arch.paravirt_insts->sh_size);
paravirt_patch_apply_inst(start, end);
}
#endif
return 0; return 0;
} }
......
/******************************************************************************
* arch/ia64/kernel/paravirt.c
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
* Yaozu (Eddie) Dong <eddie.dong@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/iosapic.h>
#include <asm/paravirt.h>
/***************************************************************************
* general info
*/
struct pv_info pv_info = {
.kernel_rpl = 0,
.paravirt_enabled = 0,
.name = "bare hardware"
};
/***************************************************************************
* pv_init_ops
* initialization hooks.
*/
static void __init
ia64_native_patch_branch(unsigned long tag, unsigned long type);
struct pv_init_ops pv_init_ops =
{
#ifdef ASM_SUPPORTED
.patch_bundle = ia64_native_patch_bundle,
#endif
.patch_branch = ia64_native_patch_branch,
};
/***************************************************************************
* pv_cpu_ops
* intrinsics hooks.
*/
#ifndef ASM_SUPPORTED
/* ia64_native_xxx are macros so that we have to make them real functions */
#define DEFINE_VOID_FUNC1(name) \
static void \
ia64_native_ ## name ## _func(unsigned long arg) \
{ \
ia64_native_ ## name(arg); \
}
#define DEFINE_VOID_FUNC1_VOID(name) \
static void \
ia64_native_ ## name ## _func(void *arg) \
{ \
ia64_native_ ## name(arg); \
}
#define DEFINE_VOID_FUNC2(name) \
static void \
ia64_native_ ## name ## _func(unsigned long arg0, \
unsigned long arg1) \
{ \
ia64_native_ ## name(arg0, arg1); \
}
#define DEFINE_FUNC0(name) \
static unsigned long \
ia64_native_ ## name ## _func(void) \
{ \
return ia64_native_ ## name(); \
}
#define DEFINE_FUNC1(name, type) \
static unsigned long \
ia64_native_ ## name ## _func(type arg) \
{ \
return ia64_native_ ## name(arg); \
} \
DEFINE_VOID_FUNC1_VOID(fc);
DEFINE_VOID_FUNC1(intrin_local_irq_restore);
DEFINE_VOID_FUNC2(ptcga);
DEFINE_VOID_FUNC2(set_rr);
DEFINE_FUNC0(get_psr_i);
DEFINE_FUNC1(thash, unsigned long);
DEFINE_FUNC1(get_cpuid, int);
DEFINE_FUNC1(get_pmd, int);
DEFINE_FUNC1(get_rr, unsigned long);
static void
ia64_native_ssm_i_func(void)
{
ia64_native_ssm(IA64_PSR_I);
}
static void
ia64_native_rsm_i_func(void)
{
ia64_native_rsm(IA64_PSR_I);
}
static void
ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
unsigned long val2, unsigned long val3,
unsigned long val4)
{
ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4);
}
#define CASE_GET_REG(id) \
case _IA64_REG_ ## id: \
res = ia64_native_getreg(_IA64_REG_ ## id); \
break;
#define CASE_GET_AR(id) CASE_GET_REG(AR_ ## id)
#define CASE_GET_CR(id) CASE_GET_REG(CR_ ## id)
unsigned long
ia64_native_getreg_func(int regnum)
{
unsigned long res = -1;
switch (regnum) {
CASE_GET_REG(GP);
/*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */
CASE_GET_REG(PSR);
CASE_GET_REG(TP);
CASE_GET_REG(SP);
CASE_GET_AR(KR0);
CASE_GET_AR(KR1);
CASE_GET_AR(KR2);
CASE_GET_AR(KR3);
CASE_GET_AR(KR4);
CASE_GET_AR(KR5);
CASE_GET_AR(KR6);
CASE_GET_AR(KR7);
CASE_GET_AR(RSC);
CASE_GET_AR(BSP);
CASE_GET_AR(BSPSTORE);
CASE_GET_AR(RNAT);
CASE_GET_AR(FCR);
CASE_GET_AR(EFLAG);
CASE_GET_AR(CSD);
CASE_GET_AR(SSD);
CASE_GET_AR(CFLAG);
CASE_GET_AR(FSR);
CASE_GET_AR(FIR);
CASE_GET_AR(FDR);
CASE_GET_AR(CCV);
CASE_GET_AR(UNAT);
CASE_GET_AR(FPSR);
CASE_GET_AR(ITC);
CASE_GET_AR(PFS);
CASE_GET_AR(LC);
CASE_GET_AR(EC);
CASE_GET_CR(DCR);
CASE_GET_CR(ITM);
CASE_GET_CR(IVA);
CASE_GET_CR(PTA);
CASE_GET_CR(IPSR);
CASE_GET_CR(ISR);
CASE_GET_CR(IIP);
CASE_GET_CR(IFA);
CASE_GET_CR(ITIR);
CASE_GET_CR(IIPA);
CASE_GET_CR(IFS);
CASE_GET_CR(IIM);
CASE_GET_CR(IHA);
CASE_GET_CR(LID);
CASE_GET_CR(IVR);
CASE_GET_CR(TPR);
CASE_GET_CR(EOI);
CASE_GET_CR(IRR0);
CASE_GET_CR(IRR1);
CASE_GET_CR(IRR2);
CASE_GET_CR(IRR3);
CASE_GET_CR(ITV);
CASE_GET_CR(PMV);
CASE_GET_CR(CMCV);
CASE_GET_CR(LRR0);
CASE_GET_CR(LRR1);
default:
printk(KERN_CRIT "wrong_getreg %d\n", regnum);
break;
}
return res;
}
#define CASE_SET_REG(id) \
case _IA64_REG_ ## id: \
ia64_native_setreg(_IA64_REG_ ## id, val); \
break;
#define CASE_SET_AR(id) CASE_SET_REG(AR_ ## id)
#define CASE_SET_CR(id) CASE_SET_REG(CR_ ## id)
void
ia64_native_setreg_func(int regnum, unsigned long val)
{
switch (regnum) {
case _IA64_REG_PSR_L:
ia64_native_setreg(_IA64_REG_PSR_L, val);
ia64_dv_serialize_data();
break;
CASE_SET_REG(SP);
CASE_SET_REG(GP);
CASE_SET_AR(KR0);
CASE_SET_AR(KR1);
CASE_SET_AR(KR2);
CASE_SET_AR(KR3);
CASE_SET_AR(KR4);
CASE_SET_AR(KR5);
CASE_SET_AR(KR6);
CASE_SET_AR(KR7);
CASE_SET_AR(RSC);
CASE_SET_AR(BSP);
CASE_SET_AR(BSPSTORE);
CASE_SET_AR(RNAT);
CASE_SET_AR(FCR);
CASE_SET_AR(EFLAG);
CASE_SET_AR(CSD);
CASE_SET_AR(SSD);
CASE_SET_AR(CFLAG);
CASE_SET_AR(FSR);
CASE_SET_AR(FIR);
CASE_SET_AR(FDR);
CASE_SET_AR(CCV);
CASE_SET_AR(UNAT);
CASE_SET_AR(FPSR);
CASE_SET_AR(ITC);
CASE_SET_AR(PFS);
CASE_SET_AR(LC);
CASE_SET_AR(EC);
CASE_SET_CR(DCR);
CASE_SET_CR(ITM);
CASE_SET_CR(IVA);
CASE_SET_CR(PTA);
CASE_SET_CR(IPSR);
CASE_SET_CR(ISR);
CASE_SET_CR(IIP);
CASE_SET_CR(IFA);
CASE_SET_CR(ITIR);
CASE_SET_CR(IIPA);
CASE_SET_CR(IFS);
CASE_SET_CR(IIM);
CASE_SET_CR(IHA);
CASE_SET_CR(LID);
CASE_SET_CR(IVR);
CASE_SET_CR(TPR);
CASE_SET_CR(EOI);
CASE_SET_CR(IRR0);
CASE_SET_CR(IRR1);
CASE_SET_CR(IRR2);
CASE_SET_CR(IRR3);
CASE_SET_CR(ITV);
CASE_SET_CR(PMV);
CASE_SET_CR(CMCV);
CASE_SET_CR(LRR0);
CASE_SET_CR(LRR1);
default:
printk(KERN_CRIT "wrong setreg %d\n", regnum);
break;
}
}
#else
#define __DEFINE_FUNC(name, code) \
extern const char ia64_native_ ## name ## _direct_start[]; \
extern const char ia64_native_ ## name ## _direct_end[]; \
asm (".align 32\n" \
".proc ia64_native_" #name "_func\n" \
"ia64_native_" #name "_func:\n" \
"ia64_native_" #name "_direct_start:\n" \
code \
"ia64_native_" #name "_direct_end:\n" \
"br.cond.sptk.many b6\n" \
".endp ia64_native_" #name "_func\n")
#define DEFINE_VOID_FUNC0(name, code) \
extern void \
ia64_native_ ## name ## _func(void); \
__DEFINE_FUNC(name, code)
#define DEFINE_VOID_FUNC1(name, code) \
extern void \
ia64_native_ ## name ## _func(unsigned long arg); \
__DEFINE_FUNC(name, code)
#define DEFINE_VOID_FUNC1_VOID(name, code) \
extern void \
ia64_native_ ## name ## _func(void *arg); \
__DEFINE_FUNC(name, code)
#define DEFINE_VOID_FUNC2(name, code) \
extern void \
ia64_native_ ## name ## _func(unsigned long arg0, \
unsigned long arg1); \
__DEFINE_FUNC(name, code)
#define DEFINE_FUNC0(name, code) \
extern unsigned long \
ia64_native_ ## name ## _func(void); \
__DEFINE_FUNC(name, code)
#define DEFINE_FUNC1(name, type, code) \
extern unsigned long \
ia64_native_ ## name ## _func(type arg); \
__DEFINE_FUNC(name, code)
DEFINE_VOID_FUNC1_VOID(fc,
"fc r8\n");
DEFINE_VOID_FUNC1(intrin_local_irq_restore,
";;\n"
" cmp.ne p6, p7 = r8, r0\n"
";;\n"
"(p6) ssm psr.i\n"
"(p7) rsm psr.i\n"
";;\n"
"(p6) srlz.d\n");
DEFINE_VOID_FUNC2(ptcga,
"ptc.ga r8, r9\n");
DEFINE_VOID_FUNC2(set_rr,
"mov rr[r8] = r9\n");
/* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */
DEFINE_FUNC0(get_psr_i,
"mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n"
"mov r8 = psr\n"
";;\n"
"and r8 = r2, r8\n");
DEFINE_FUNC1(thash, unsigned long,
"thash r8 = r8\n");
DEFINE_FUNC1(get_cpuid, int,
"mov r8 = cpuid[r8]\n");
DEFINE_FUNC1(get_pmd, int,
"mov r8 = pmd[r8]\n");
DEFINE_FUNC1(get_rr, unsigned long,
"mov r8 = rr[r8]\n");
DEFINE_VOID_FUNC0(ssm_i,
"ssm psr.i\n");
DEFINE_VOID_FUNC0(rsm_i,
"rsm psr.i\n");
extern void
ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
unsigned long val2, unsigned long val3,
unsigned long val4);
__DEFINE_FUNC(set_rr0_to_rr4,
"mov rr[r0] = r8\n"
"movl r2 = 0x2000000000000000\n"
";;\n"
"mov rr[r2] = r9\n"
"shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */
";;\n"
"add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */
"mov rr[r3] = r10\n"
";;\n"
"mov rr[r2] = r11\n"
"shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */
";;\n"
"mov rr[r3] = r14\n");
extern unsigned long ia64_native_getreg_func(int regnum);
asm(".global ia64_native_getreg_func\n");
#define __DEFINE_GET_REG(id, reg) \
"mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
";;\n" \
"cmp.eq p6, p0 = r2, r8\n" \
";;\n" \
"(p6) mov r8 = " #reg "\n" \
"(p6) br.cond.sptk.many b6\n" \
";;\n"
#define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg)
#define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg)
__DEFINE_FUNC(getreg,
__DEFINE_GET_REG(GP, gp)
/*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */
__DEFINE_GET_REG(PSR, psr)
__DEFINE_GET_REG(TP, tp)
__DEFINE_GET_REG(SP, sp)
__DEFINE_GET_REG(AR_KR0, ar0)
__DEFINE_GET_REG(AR_KR1, ar1)
__DEFINE_GET_REG(AR_KR2, ar2)
__DEFINE_GET_REG(AR_KR3, ar3)
__DEFINE_GET_REG(AR_KR4, ar4)
__DEFINE_GET_REG(AR_KR5, ar5)
__DEFINE_GET_REG(AR_KR6, ar6)
__DEFINE_GET_REG(AR_KR7, ar7)
__DEFINE_GET_AR(RSC, rsc)
__DEFINE_GET_AR(BSP, bsp)
__DEFINE_GET_AR(BSPSTORE, bspstore)
__DEFINE_GET_AR(RNAT, rnat)
__DEFINE_GET_AR(FCR, fcr)
__DEFINE_GET_AR(EFLAG, eflag)
__DEFINE_GET_AR(CSD, csd)
__DEFINE_GET_AR(SSD, ssd)
__DEFINE_GET_REG(AR_CFLAG, ar27)
__DEFINE_GET_AR(FSR, fsr)
__DEFINE_GET_AR(FIR, fir)
__DEFINE_GET_AR(FDR, fdr)
__DEFINE_GET_AR(CCV, ccv)
__DEFINE_GET_AR(UNAT, unat)
__DEFINE_GET_AR(FPSR, fpsr)
__DEFINE_GET_AR(ITC, itc)
__DEFINE_GET_AR(PFS, pfs)
__DEFINE_GET_AR(LC, lc)
__DEFINE_GET_AR(EC, ec)
__DEFINE_GET_CR(DCR, dcr)
__DEFINE_GET_CR(ITM, itm)
__DEFINE_GET_CR(IVA, iva)
__DEFINE_GET_CR(PTA, pta)
__DEFINE_GET_CR(IPSR, ipsr)
__DEFINE_GET_CR(ISR, isr)
__DEFINE_GET_CR(IIP, iip)
__DEFINE_GET_CR(IFA, ifa)
__DEFINE_GET_CR(ITIR, itir)
__DEFINE_GET_CR(IIPA, iipa)
__DEFINE_GET_CR(IFS, ifs)
__DEFINE_GET_CR(IIM, iim)
__DEFINE_GET_CR(IHA, iha)
__DEFINE_GET_CR(LID, lid)
__DEFINE_GET_CR(IVR, ivr)
__DEFINE_GET_CR(TPR, tpr)
__DEFINE_GET_CR(EOI, eoi)
__DEFINE_GET_CR(IRR0, irr0)
__DEFINE_GET_CR(IRR1, irr1)
__DEFINE_GET_CR(IRR2, irr2)
__DEFINE_GET_CR(IRR3, irr3)
__DEFINE_GET_CR(ITV, itv)
__DEFINE_GET_CR(PMV, pmv)
__DEFINE_GET_CR(CMCV, cmcv)
__DEFINE_GET_CR(LRR0, lrr0)
__DEFINE_GET_CR(LRR1, lrr1)
"mov r8 = -1\n" /* unsupported case */
);
extern void ia64_native_setreg_func(int regnum, unsigned long val);
asm(".global ia64_native_setreg_func\n");
#define __DEFINE_SET_REG(id, reg) \
"mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
";;\n" \
"cmp.eq p6, p0 = r2, r9\n" \
";;\n" \
"(p6) mov " #reg " = r8\n" \
"(p6) br.cond.sptk.many b6\n" \
";;\n"
#define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg)
#define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg)
__DEFINE_FUNC(setreg,
"mov r2 = " __stringify(_IA64_REG_PSR_L) "\n"
";;\n"
"cmp.eq p6, p0 = r2, r9\n"
";;\n"
"(p6) mov psr.l = r8\n"
#ifdef HAVE_SERIALIZE_DIRECTIVE
".serialize.data\n"
#endif
"(p6) br.cond.sptk.many b6\n"
__DEFINE_SET_REG(GP, gp)
__DEFINE_SET_REG(SP, sp)
__DEFINE_SET_REG(AR_KR0, ar0)
__DEFINE_SET_REG(AR_KR1, ar1)
__DEFINE_SET_REG(AR_KR2, ar2)
__DEFINE_SET_REG(AR_KR3, ar3)
__DEFINE_SET_REG(AR_KR4, ar4)
__DEFINE_SET_REG(AR_KR5, ar5)
__DEFINE_SET_REG(AR_KR6, ar6)
__DEFINE_SET_REG(AR_KR7, ar7)
__DEFINE_SET_AR(RSC, rsc)
__DEFINE_SET_AR(BSP, bsp)
__DEFINE_SET_AR(BSPSTORE, bspstore)
__DEFINE_SET_AR(RNAT, rnat)
__DEFINE_SET_AR(FCR, fcr)
__DEFINE_SET_AR(EFLAG, eflag)
__DEFINE_SET_AR(CSD, csd)
__DEFINE_SET_AR(SSD, ssd)
__DEFINE_SET_REG(AR_CFLAG, ar27)
__DEFINE_SET_AR(FSR, fsr)
__DEFINE_SET_AR(FIR, fir)
__DEFINE_SET_AR(FDR, fdr)
__DEFINE_SET_AR(CCV, ccv)
__DEFINE_SET_AR(UNAT, unat)
__DEFINE_SET_AR(FPSR, fpsr)
__DEFINE_SET_AR(ITC, itc)
__DEFINE_SET_AR(PFS, pfs)
__DEFINE_SET_AR(LC, lc)
__DEFINE_SET_AR(EC, ec)
__DEFINE_SET_CR(DCR, dcr)
__DEFINE_SET_CR(ITM, itm)
__DEFINE_SET_CR(IVA, iva)
__DEFINE_SET_CR(PTA, pta)
__DEFINE_SET_CR(IPSR, ipsr)
__DEFINE_SET_CR(ISR, isr)
__DEFINE_SET_CR(IIP, iip)
__DEFINE_SET_CR(IFA, ifa)
__DEFINE_SET_CR(ITIR, itir)
__DEFINE_SET_CR(IIPA, iipa)
__DEFINE_SET_CR(IFS, ifs)
__DEFINE_SET_CR(IIM, iim)
__DEFINE_SET_CR(IHA, iha)
__DEFINE_SET_CR(LID, lid)
__DEFINE_SET_CR(IVR, ivr)
__DEFINE_SET_CR(TPR, tpr)
__DEFINE_SET_CR(EOI, eoi)
__DEFINE_SET_CR(IRR0, irr0)
__DEFINE_SET_CR(IRR1, irr1)
__DEFINE_SET_CR(IRR2, irr2)
__DEFINE_SET_CR(IRR3, irr3)
__DEFINE_SET_CR(ITV, itv)
__DEFINE_SET_CR(PMV, pmv)
__DEFINE_SET_CR(CMCV, cmcv)
__DEFINE_SET_CR(LRR0, lrr0)
__DEFINE_SET_CR(LRR1, lrr1)
);
#endif
struct pv_cpu_ops pv_cpu_ops = {
.fc = ia64_native_fc_func,
.thash = ia64_native_thash_func,
.get_cpuid = ia64_native_get_cpuid_func,
.get_pmd = ia64_native_get_pmd_func,
.ptcga = ia64_native_ptcga_func,
.get_rr = ia64_native_get_rr_func,
.set_rr = ia64_native_set_rr_func,
.set_rr0_to_rr4 = ia64_native_set_rr0_to_rr4_func,
.ssm_i = ia64_native_ssm_i_func,
.getreg = ia64_native_getreg_func,
.setreg = ia64_native_setreg_func,
.rsm_i = ia64_native_rsm_i_func,
.get_psr_i = ia64_native_get_psr_i_func,
.intrin_local_irq_restore
= ia64_native_intrin_local_irq_restore_func,
};
EXPORT_SYMBOL(pv_cpu_ops);
/******************************************************************************
* replacement of hand written assembly codes.
*/
void
paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
{
extern unsigned long paravirt_switch_to_targ;
extern unsigned long paravirt_leave_syscall_targ;
extern unsigned long paravirt_work_processed_syscall_targ;
extern unsigned long paravirt_leave_kernel_targ;
paravirt_switch_to_targ = cpu_asm_switch->switch_to;
paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
paravirt_work_processed_syscall_targ =
cpu_asm_switch->work_processed_syscall;
paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
}
/***************************************************************************
* pv_iosapic_ops
* iosapic read/write hooks.
*/
static unsigned int
ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
{
return __ia64_native_iosapic_read(iosapic, reg);
}
static void
ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
__ia64_native_iosapic_write(iosapic, reg, val);
}
struct pv_iosapic_ops pv_iosapic_ops = {
.pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
.__get_irq_chip = ia64_native_iosapic_get_irq_chip,
.__read = ia64_native_iosapic_read,
.__write = ia64_native_iosapic_write,
};
/***************************************************************************
* pv_irq_ops
* irq operations
*/
struct pv_irq_ops pv_irq_ops = {
.register_ipi = ia64_native_register_ipi,
.assign_irq_vector = ia64_native_assign_irq_vector,
.free_irq_vector = ia64_native_free_irq_vector,
.register_percpu_irq = ia64_native_register_percpu_irq,
.resend_irq = ia64_native_resend_irq,
};
/***************************************************************************
* pv_time_ops
* time operations
*/
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static int
ia64_native_do_steal_accounting(unsigned long *new_itm)
{
return 0;
}
struct pv_time_ops pv_time_ops = {
.do_steal_accounting = ia64_native_do_steal_accounting,
.sched_clock = ia64_native_sched_clock,
};
/***************************************************************************
* binary pacthing
* pv_init_ops.patch_bundle
*/
#ifdef ASM_SUPPORTED
#define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \
__DEFINE_FUNC(get_ ## name, \
";;\n" \
"mov r8 = " #reg "\n" \
";;\n")
#define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
__DEFINE_FUNC(set_ ## name, \
";;\n" \
"mov " #reg " = r8\n" \
";;\n")
#define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \
IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \
IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
#define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \
IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg)
#define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \
IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg)
IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr);
IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp);
/* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */
__DEFINE_FUNC(set_psr_l,
";;\n"
"mov psr.l = r8\n"
#ifdef HAVE_SERIALIZE_DIRECTIVE
".serialize.data\n"
#endif
";;\n");
IA64_NATIVE_PATCH_DEFINE_REG(gp, gp);
IA64_NATIVE_PATCH_DEFINE_REG(sp, sp);
IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0);
IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1);
IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2);
IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3);
IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4);
IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5);
IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6);
IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7);
IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc);
IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp);
IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore);
IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat);
IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr);
IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag);
IA64_NATIVE_PATCH_DEFINE_AR(csd, csd);
IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd);
IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27);
IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr);
IA64_NATIVE_PATCH_DEFINE_AR(fir, fir);
IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr);
IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv);
IA64_NATIVE_PATCH_DEFINE_AR(unat, unat);
IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr);
IA64_NATIVE_PATCH_DEFINE_AR(itc, itc);
IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs);
IA64_NATIVE_PATCH_DEFINE_AR(lc, lc);
IA64_NATIVE_PATCH_DEFINE_AR(ec, ec);
IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr);
IA64_NATIVE_PATCH_DEFINE_CR(itm, itm);
IA64_NATIVE_PATCH_DEFINE_CR(iva, iva);
IA64_NATIVE_PATCH_DEFINE_CR(pta, pta);
IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr);
IA64_NATIVE_PATCH_DEFINE_CR(isr, isr);
IA64_NATIVE_PATCH_DEFINE_CR(iip, iip);
IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa);
IA64_NATIVE_PATCH_DEFINE_CR(itir, itir);
IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa);
IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs);
IA64_NATIVE_PATCH_DEFINE_CR(iim, iim);
IA64_NATIVE_PATCH_DEFINE_CR(iha, iha);
IA64_NATIVE_PATCH_DEFINE_CR(lid, lid);
IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr);
IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr);
IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi);
IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0);
IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1);
IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2);
IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3);
IA64_NATIVE_PATCH_DEFINE_CR(itv, itv);
IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv);
IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv);
IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0);
IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1);
static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[]
__initdata_or_module =
{
#define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \
{ \
(void*)ia64_native_ ## name ## _direct_start, \
(void*)ia64_native_ ## name ## _direct_end, \
PARAVIRT_PATCH_TYPE_ ## type, \
}
IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC),
IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH),
IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore,
INTRIN_LOCAL_IRQ_RESTORE),
#define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
{ \
(void*)ia64_native_get_ ## name ## _direct_start, \
(void*)ia64_native_get_ ## name ## _direct_end, \
PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
}
#define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
{ \
(void*)ia64_native_set_ ## name ## _direct_start, \
(void*)ia64_native_set_ ## name ## _direct_end, \
PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
}
#define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \
IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \
IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
#define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg)
#define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg)
IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP),
IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD),
IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC),
IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0),
IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1),
};
unsigned long __init_or_module
ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
{
const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) /
sizeof(ia64_native_patch_bundle_elems[0]);
return __paravirt_patch_apply_bundle(sbundle, ebundle, type,
ia64_native_patch_bundle_elems,
nelems, NULL);
}
#endif /* ASM_SUPPOTED */
extern const char ia64_native_switch_to[];
extern const char ia64_native_leave_syscall[];
extern const char ia64_native_work_processed_syscall[];
extern const char ia64_native_leave_kernel[];
const struct paravirt_patch_branch_target ia64_native_branch_target[]
__initconst = {
#define PARAVIRT_BR_TARGET(name, type) \
{ \
ia64_native_ ## name, \
PARAVIRT_PATCH_TYPE_BR_ ## type, \
}
PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
};
static void __init
ia64_native_patch_branch(unsigned long tag, unsigned long type)
{
const unsigned long nelem =
sizeof(ia64_native_branch_target) /
sizeof(ia64_native_branch_target[0]);
__paravirt_patch_apply_branch(tag, type,
ia64_native_branch_target, nelem);
}
/******************************************************************************
* linux/arch/ia64/xen/paravirt_inst.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
#include <asm/native/pvchk_inst.h>
#else
#include <asm/native/inst.h>
#endif
/******************************************************************************
* linux/arch/ia64/xen/paravirt_patch.c
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <asm/intrinsics.h>
#include <asm/kprobes.h>
#include <asm/paravirt.h>
#include <asm/paravirt_patch.h>
typedef union ia64_inst {
struct {
unsigned long long qp : 6;
unsigned long long : 31;
unsigned long long opcode : 4;
unsigned long long reserved : 23;
} generic;
unsigned long long l;
} ia64_inst_t;
/*
* flush_icache_range() can't be used here.
* we are here before cpu_init() which initializes
* ia64_i_cache_stride_shift. flush_icache_range() uses it.
*/
void __init_or_module
paravirt_flush_i_cache_range(const void *instr, unsigned long size)
{
extern void paravirt_fc_i(const void *addr);
unsigned long i;
for (i = 0; i < size; i += sizeof(bundle_t))
paravirt_fc_i(instr + i);
}
bundle_t* __init_or_module
paravirt_get_bundle(unsigned long tag)
{
return (bundle_t *)(tag & ~3UL);
}
unsigned long __init_or_module
paravirt_get_slot(unsigned long tag)
{
return tag & 3UL;
}
unsigned long __init_or_module
paravirt_get_num_inst(unsigned long stag, unsigned long etag)
{
bundle_t *sbundle = paravirt_get_bundle(stag);
unsigned long sslot = paravirt_get_slot(stag);
bundle_t *ebundle = paravirt_get_bundle(etag);
unsigned long eslot = paravirt_get_slot(etag);
return (ebundle - sbundle) * 3 + eslot - sslot + 1;
}
unsigned long __init_or_module
paravirt_get_next_tag(unsigned long tag)
{
unsigned long slot = paravirt_get_slot(tag);
switch (slot) {
case 0:
case 1:
return tag + 1;
case 2: {
bundle_t *bundle = paravirt_get_bundle(tag);
return (unsigned long)(bundle + 1);
}
default:
BUG();
}
/* NOTREACHED */
}
ia64_inst_t __init_or_module
paravirt_read_slot0(const bundle_t *bundle)
{
ia64_inst_t inst;
inst.l = bundle->quad0.slot0;
return inst;
}
ia64_inst_t __init_or_module
paravirt_read_slot1(const bundle_t *bundle)
{
ia64_inst_t inst;
inst.l = bundle->quad0.slot1_p0 |
((unsigned long long)bundle->quad1.slot1_p1 << 18UL);
return inst;
}
ia64_inst_t __init_or_module
paravirt_read_slot2(const bundle_t *bundle)
{
ia64_inst_t inst;
inst.l = bundle->quad1.slot2;
return inst;
}
ia64_inst_t __init_or_module
paravirt_read_inst(unsigned long tag)
{
bundle_t *bundle = paravirt_get_bundle(tag);
unsigned long slot = paravirt_get_slot(tag);
switch (slot) {
case 0:
return paravirt_read_slot0(bundle);
case 1:
return paravirt_read_slot1(bundle);
case 2:
return paravirt_read_slot2(bundle);
default:
BUG();
}
/* NOTREACHED */
}
void __init_or_module
paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst)
{
bundle->quad0.slot0 = inst.l;
}
void __init_or_module
paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst)
{
bundle->quad0.slot1_p0 = inst.l;
bundle->quad1.slot1_p1 = inst.l >> 18UL;
}
void __init_or_module
paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst)
{
bundle->quad1.slot2 = inst.l;
}
void __init_or_module
paravirt_write_inst(unsigned long tag, ia64_inst_t inst)
{
bundle_t *bundle = paravirt_get_bundle(tag);
unsigned long slot = paravirt_get_slot(tag);
switch (slot) {
case 0:
paravirt_write_slot0(bundle, inst);
break;
case 1:
paravirt_write_slot1(bundle, inst);
break;
case 2:
paravirt_write_slot2(bundle, inst);
break;
default:
BUG();
break;
}
paravirt_flush_i_cache_range(bundle, sizeof(*bundle));
}
/* for debug */
void
paravirt_print_bundle(const bundle_t *bundle)
{
const unsigned long *quad = (const unsigned long *)bundle;
ia64_inst_t slot0 = paravirt_read_slot0(bundle);
ia64_inst_t slot1 = paravirt_read_slot1(bundle);
ia64_inst_t slot2 = paravirt_read_slot2(bundle);
printk(KERN_DEBUG
"bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]);
printk(KERN_DEBUG
"bundle template 0x%x\n",
bundle->quad0.template);
printk(KERN_DEBUG
"slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n",
(unsigned long)bundle->quad0.slot0,
(unsigned long)bundle->quad0.slot1_p0,
(unsigned long)bundle->quad1.slot1_p1,
(unsigned long)bundle->quad1.slot2);
printk(KERN_DEBUG
"slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n",
slot0.l, slot1.l, slot2.l);
}
static int noreplace_paravirt __init_or_module = 0;
static int __init setup_noreplace_paravirt(char *str)
{
noreplace_paravirt = 1;
return 1;
}
__setup("noreplace-paravirt", setup_noreplace_paravirt);
#ifdef ASM_SUPPORTED
static void __init_or_module
fill_nop_bundle(void *sbundle, void *ebundle)
{
extern const char paravirt_nop_bundle[];
extern const unsigned long paravirt_nop_bundle_size;
void *bundle = sbundle;
BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
while (bundle < ebundle) {
memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size);
bundle += paravirt_nop_bundle_size;
}
}
/* helper function */
unsigned long __init_or_module
__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
const struct paravirt_patch_bundle_elem *elems,
unsigned long nelems,
const struct paravirt_patch_bundle_elem **found)
{
unsigned long used = 0;
unsigned long i;
BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
found = NULL;
for (i = 0; i < nelems; i++) {
const struct paravirt_patch_bundle_elem *p = &elems[i];
if (p->type == type) {
unsigned long need = p->ebundle - p->sbundle;
unsigned long room = ebundle - sbundle;
if (found != NULL)
*found = p;
if (room < need) {
/* no room to replace. skip it */
printk(KERN_DEBUG
"the space is too small to put "
"bundles. type %ld need %ld room %ld\n",
type, need, room);
break;
}
used = need;
memcpy(sbundle, p->sbundle, used);
break;
}
}
return used;
}
void __init_or_module
paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
const struct paravirt_patch_site_bundle *end)
{
const struct paravirt_patch_site_bundle *p;
if (noreplace_paravirt)
return;
if (pv_init_ops.patch_bundle == NULL)
return;
for (p = start; p < end; p++) {
unsigned long used;
used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle,
p->type);
if (used == 0)
continue;
fill_nop_bundle(p->sbundle + used, p->ebundle);
paravirt_flush_i_cache_range(p->sbundle,
p->ebundle - p->sbundle);
}
ia64_sync_i();
ia64_srlz_i();
}
/*
* nop.i, nop.m, nop.f instruction are same format.
* but nop.b has differennt format.
* This doesn't support nop.b for now.
*/
static void __init_or_module
fill_nop_inst(unsigned long stag, unsigned long etag)
{
extern const bundle_t paravirt_nop_mfi_inst_bundle[];
unsigned long tag;
const ia64_inst_t nop_inst =
paravirt_read_slot0(paravirt_nop_mfi_inst_bundle);
for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag))
paravirt_write_inst(tag, nop_inst);
}
void __init_or_module
paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
const struct paravirt_patch_site_inst *end)
{
const struct paravirt_patch_site_inst *p;
if (noreplace_paravirt)
return;
if (pv_init_ops.patch_inst == NULL)
return;
for (p = start; p < end; p++) {
unsigned long tag;
bundle_t *sbundle;
bundle_t *ebundle;
tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type);
if (tag == p->stag)
continue;
fill_nop_inst(tag, p->etag);
sbundle = paravirt_get_bundle(p->stag);
ebundle = paravirt_get_bundle(p->etag) + 1;
paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) *
sizeof(bundle_t));
}
ia64_sync_i();
ia64_srlz_i();
}
#endif /* ASM_SUPPOTED */
/* brl.cond.sptk.many <target64> X3 */
typedef union inst_x3_op {
ia64_inst_t inst;
struct {
unsigned long qp: 6;
unsigned long btyp: 3;
unsigned long unused: 3;
unsigned long p: 1;
unsigned long imm20b: 20;
unsigned long wh: 2;
unsigned long d: 1;
unsigned long i: 1;
unsigned long opcode: 4;
};
unsigned long l;
} inst_x3_op_t;
typedef union inst_x3_imm {
ia64_inst_t inst;
struct {
unsigned long unused: 2;
unsigned long imm39: 39;
};
unsigned long l;
} inst_x3_imm_t;
void __init_or_module
paravirt_patch_reloc_brl(unsigned long tag, const void *target)
{
unsigned long tag_op = paravirt_get_next_tag(tag);
unsigned long tag_imm = tag;
bundle_t *bundle = paravirt_get_bundle(tag);
ia64_inst_t inst_op = paravirt_read_inst(tag_op);
ia64_inst_t inst_imm = paravirt_read_inst(tag_imm);
inst_x3_op_t inst_x3_op = { .l = inst_op.l };
inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l };
unsigned long imm60 =
((unsigned long)target - (unsigned long)bundle) >> 4;
BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */
BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
/* imm60[59] 1bit */
inst_x3_op.i = (imm60 >> 59) & 1;
/* imm60[19:0] 20bit */
inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1);
/* imm60[58:20] 39bit */
inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1);
inst_op.l = inst_x3_op.l;
inst_imm.l = inst_x3_imm.l;
paravirt_write_inst(tag_op, inst_op);
paravirt_write_inst(tag_imm, inst_imm);
}
/* br.cond.sptk.many <target25> B1 */
typedef union inst_b1 {
ia64_inst_t inst;
struct {
unsigned long qp: 6;
unsigned long btype: 3;
unsigned long unused: 3;
unsigned long p: 1;
unsigned long imm20b: 20;
unsigned long wh: 2;
unsigned long d: 1;
unsigned long s: 1;
unsigned long opcode: 4;
};
unsigned long l;
} inst_b1_t;
void __init
paravirt_patch_reloc_br(unsigned long tag, const void *target)
{
bundle_t *bundle = paravirt_get_bundle(tag);
ia64_inst_t inst = paravirt_read_inst(tag);
unsigned long target25 = (unsigned long)target - (unsigned long)bundle;
inst_b1_t inst_b1;
BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
inst_b1.l = inst.l;
if (target25 & (1UL << 63))
inst_b1.s = 1;
else
inst_b1.s = 0;
inst_b1.imm20b = target25 >> 4;
inst.l = inst_b1.l;
paravirt_write_inst(tag, inst);
}
void __init
__paravirt_patch_apply_branch(
unsigned long tag, unsigned long type,
const struct paravirt_patch_branch_target *entries,
unsigned int nr_entries)
{
unsigned int i;
for (i = 0; i < nr_entries; i++) {
if (entries[i].type == type) {
paravirt_patch_reloc_br(tag, entries[i].entry);
break;
}
}
}
static void __init
paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start,
const struct paravirt_patch_site_branch *end)
{
const struct paravirt_patch_site_branch *p;
if (noreplace_paravirt)
return;
if (pv_init_ops.patch_branch == NULL)
return;
for (p = start; p < end; p++)
(*pv_init_ops.patch_branch)(p->tag, p->type);
ia64_sync_i();
ia64_srlz_i();
}
void __init
paravirt_patch_apply(void)
{
extern const char __start_paravirt_bundles[];
extern const char __stop_paravirt_bundles[];
extern const char __start_paravirt_insts[];
extern const char __stop_paravirt_insts[];
extern const char __start_paravirt_branches[];
extern const char __stop_paravirt_branches[];
paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *)
__start_paravirt_bundles,
(const struct paravirt_patch_site_bundle *)
__stop_paravirt_bundles);
paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *)
__start_paravirt_insts,
(const struct paravirt_patch_site_inst *)
__stop_paravirt_insts);
paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *)
__start_paravirt_branches,
(const struct paravirt_patch_site_branch *)
__stop_paravirt_branches);
}
/*
* Local variables:
* mode: C
* c-set-style: "linux"
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*/
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/paravirt.h>
#define DECLARE(name) \
extern unsigned long \
__ia64_native_start_gate_##name##_patchlist[]; \
extern unsigned long \
__ia64_native_end_gate_##name##_patchlist[]
DECLARE(fsyscall);
DECLARE(brl_fsys_bubble_down);
DECLARE(vtop);
DECLARE(mckinley_e9);
extern unsigned long __start_gate_section[];
#define ASSIGN(name) \
.start_##name##_patchlist = \
(unsigned long)__ia64_native_start_gate_##name##_patchlist, \
.end_##name##_patchlist = \
(unsigned long)__ia64_native_end_gate_##name##_patchlist
struct pv_patchdata pv_patchdata __initdata = {
ASSIGN(fsyscall),
ASSIGN(brl_fsys_bubble_down),
ASSIGN(vtop),
ASSIGN(mckinley_e9),
.gate_section = (void*)__start_gate_section,
};
unsigned long __init
paravirt_get_gate_patchlist(enum pv_gate_patchlist type)
{
#define CASE(NAME, name) \
case PV_GATE_START_##NAME: \
return pv_patchdata.start_##name##_patchlist; \
case PV_GATE_END_##NAME: \
return pv_patchdata.end_##name##_patchlist; \
switch (type) {
CASE(FSYSCALL, fsyscall);
CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down);
CASE(VTOP, vtop);
CASE(MCKINLEY_E9, mckinley_e9);
default:
BUG();
break;
}
return 0;
}
void * __init
paravirt_get_gate_section(void)
{
return pv_patchdata.gate_section;
}
/******************************************************************************
* linux/arch/ia64/xen/paravirt_patchlist.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/native/patchlist.h>
/******************************************************************************
* linux/arch/ia64/xen/paravirtentry.S
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <asm/asmmacro.h>
#include <asm/asm-offsets.h>
#include <asm/paravirt_privop.h>
#include <asm/paravirt_patch.h>
#include "entry.h"
#define DATA8(sym, init_value) \
.pushsection .data..read_mostly ; \
.align 8 ; \
.global sym ; \
sym: ; \
data8 init_value ; \
.popsection
#define BRANCH(targ, reg, breg, type) \
PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \
;; \
movl reg=targ ; \
;; \
ld8 reg=[reg] ; \
;; \
mov breg=reg ; \
br.cond.sptk.many breg
#define BRANCH_PROC(sym, reg, breg, type) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym)
#define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \
PT_REGS_UNWIND_INFO(0) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym)
BRANCH_PROC(switch_to, r22, b7, SWITCH_TO)
BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL)
BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL)
BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL)
#ifdef CONFIG_MODULES
#define __INIT_OR_MODULE .text
#define __INITDATA_OR_MODULE .data
#else
#define __INIT_OR_MODULE __INIT
#define __INITDATA_OR_MODULE __INITDATA
#endif /* CONFIG_MODULES */
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_fc_i)
fc.i r32
br.ret.sptk.many rp
END(paravirt_fc_i)
__FINIT
__INIT_OR_MODULE
.align 32
GLOBAL_ENTRY(paravirt_nop_b_inst_bundle)
{
nop.b 0
nop.b 0
nop.b 0
}
END(paravirt_nop_b_inst_bundle)
__FINIT
/* NOTE: nop.[mfi] has same format */
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle)
{
nop.m 0
nop.f 0
nop.i 0
}
END(paravirt_nop_mfi_inst_bundle)
__FINIT
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_nop_bundle)
paravirt_nop_bundle_start:
{
nop 0
nop 0
nop 0
}
paravirt_nop_bundle_end:
END(paravirt_nop_bundle)
__FINIT
__INITDATA_OR_MODULE
.align 8
.global paravirt_nop_bundle_size
paravirt_nop_bundle_size:
data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/paravirt.h>
#include <asm/patch.h> #include <asm/patch.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -169,35 +168,16 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) ...@@ -169,35 +168,16 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
ia64_srlz_i(); ia64_srlz_i();
} }
extern unsigned long ia64_native_fsyscall_table[NR_syscalls];
extern char ia64_native_fsys_bubble_down[];
struct pv_fsys_data pv_fsys_data __initdata = {
.fsyscall_table = (unsigned long *)ia64_native_fsyscall_table,
.fsys_bubble_down = (void *)ia64_native_fsys_bubble_down,
};
unsigned long * __init
paravirt_get_fsyscall_table(void)
{
return pv_fsys_data.fsyscall_table;
}
char * __init
paravirt_get_fsys_bubble_down(void)
{
return pv_fsys_data.fsys_bubble_down;
}
static void __init static void __init
patch_fsyscall_table (unsigned long start, unsigned long end) patch_fsyscall_table (unsigned long start, unsigned long end)
{ {
u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); extern unsigned long fsyscall_table[NR_syscalls];
s32 *offp = (s32 *) start; s32 *offp = (s32 *) start;
u64 ip; u64 ip;
while (offp < (s32 *) end) { while (offp < (s32 *) end) {
ip = (u64) ia64_imva((char *) offp + *offp); ip = (u64) ia64_imva((char *) offp + *offp);
ia64_patch_imm64(ip, fsyscall_table); ia64_patch_imm64(ip, (u64) fsyscall_table);
ia64_fc((void *) ip); ia64_fc((void *) ip);
++offp; ++offp;
} }
...@@ -208,7 +188,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end) ...@@ -208,7 +188,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
static void __init static void __init
patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
{ {
u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); extern char fsys_bubble_down[];
s32 *offp = (s32 *) start; s32 *offp = (s32 *) start;
u64 ip; u64 ip;
...@@ -226,13 +206,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) ...@@ -226,13 +206,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
void __init void __init
ia64_patch_gate (void) ia64_patch_gate (void)
{ {
# define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) # define START(name) ((unsigned long) __start_gate_##name##_patchlist)
# define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) # define END(name) ((unsigned long)__end_gate_##name##_patchlist)
patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); patch_fsyscall_table(START(fsyscall), END(fsyscall));
patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
ia64_patch_vtop(START(VTOP), END(VTOP)); ia64_patch_vtop(START(vtop), END(vtop));
ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
} }
void ia64_patch_phys_stack_reg(unsigned long val) void ia64_patch_phys_stack_reg(unsigned long val)
......
...@@ -50,8 +50,6 @@ ...@@ -50,8 +50,6 @@
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/meminit.h> #include <asm/meminit.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/paravirt_patch.h>
#include <asm/patch.h> #include <asm/patch.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -360,8 +358,6 @@ reserve_memory (void) ...@@ -360,8 +358,6 @@ reserve_memory (void)
rsvd_region[n].end = (unsigned long) ia64_imva(_end); rsvd_region[n].end = (unsigned long) ia64_imva(_end);
n++; n++;
n += paravirt_reserve_memory(&rsvd_region[n]);
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (ia64_boot_param->initrd_start) { if (ia64_boot_param->initrd_start) {
rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
...@@ -528,10 +524,7 @@ setup_arch (char **cmdline_p) ...@@ -528,10 +524,7 @@ setup_arch (char **cmdline_p)
{ {
unw_init(); unw_init();
paravirt_arch_setup_early();
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
paravirt_patch_apply();
*cmdline_p = __va(ia64_boot_param->command_line); *cmdline_p = __va(ia64_boot_param->command_line);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
...@@ -594,9 +587,6 @@ setup_arch (char **cmdline_p) ...@@ -594,9 +587,6 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */ cpu_init(); /* initialize the bootstrap CPU */
mmu_context_init(); /* initialize context_id bitmap */ mmu_context_init(); /* initialize context_id bitmap */
paravirt_banner();
paravirt_arch_setup_console(cmdline_p);
#ifdef CONFIG_VT #ifdef CONFIG_VT
if (!conswitchp) { if (!conswitchp) {
# if defined(CONFIG_DUMMY_CONSOLE) # if defined(CONFIG_DUMMY_CONSOLE)
...@@ -616,8 +606,6 @@ setup_arch (char **cmdline_p) ...@@ -616,8 +606,6 @@ setup_arch (char **cmdline_p)
#endif #endif
/* enable IA-64 Machine Check Abort Handling unless disabled */ /* enable IA-64 Machine Check Abort Handling unless disabled */
if (paravirt_arch_setup_nomca())
nomca = 1;
if (!nomca) if (!nomca)
ia64_mca_init(); ia64_mca_init();
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -569,7 +568,6 @@ void smp_prepare_boot_cpu(void) ...@@ -569,7 +568,6 @@ void smp_prepare_boot_cpu(void)
cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
set_numa_node(cpu_to_node_map[smp_processor_id()]); set_numa_node(cpu_to_node_map[smp_processor_id()]);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
paravirt_post_smp_prepare_boot_cpu();
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/paravirt.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -47,33 +46,12 @@ EXPORT_SYMBOL(last_cli_ip); ...@@ -47,33 +46,12 @@ EXPORT_SYMBOL(last_cli_ip);
#endif #endif
#ifdef CONFIG_PARAVIRT
/* We need to define a real function for sched_clock, to override the
weak default version */
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#endif
#ifdef CONFIG_PARAVIRT
static void
paravirt_clocksource_resume(struct clocksource *cs)
{
if (pv_time_ops.clocksource_resume)
pv_time_ops.clocksource_resume();
}
#endif
static struct clocksource clocksource_itc = { static struct clocksource clocksource_itc = {
.name = "itc", .name = "itc",
.rating = 350, .rating = 350,
.read = itc_get_cycles, .read = itc_get_cycles,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
#ifdef CONFIG_PARAVIRT
.resume = paravirt_clocksource_resume,
#endif
}; };
static struct clocksource *itc_clocksource; static struct clocksource *itc_clocksource;
...@@ -164,9 +142,6 @@ timer_interrupt (int irq, void *dev_id) ...@@ -164,9 +142,6 @@ timer_interrupt (int irq, void *dev_id)
profile_tick(CPU_PROFILING); profile_tick(CPU_PROFILING);
if (paravirt_do_steal_accounting(&new_itm))
goto skip_process_time_accounting;
while (1) { while (1) {
update_process_times(user_mode(get_irq_regs())); update_process_times(user_mode(get_irq_regs()));
...@@ -187,8 +162,6 @@ timer_interrupt (int irq, void *dev_id) ...@@ -187,8 +162,6 @@ timer_interrupt (int irq, void *dev_id)
local_irq_disable(); local_irq_disable();
} }
skip_process_time_accounting:
do { do {
/* /*
* If we're too close to the next clock tick for * If we're too close to the next clock tick for
...@@ -337,8 +310,6 @@ void ia64_init_itm(void) ...@@ -337,8 +310,6 @@ void ia64_init_itm(void)
*/ */
clocksource_itc.rating = 50; clocksource_itc.rating = 50;
paravirt_init_missing_ticks_accounting(smp_processor_id());
/* avoid softlock up message when cpu is unplug and plugged again. */ /* avoid softlock up message when cpu is unplug and plugged again. */
touch_softlockup_watchdog(); touch_softlockup_watchdog();
......
...@@ -136,27 +136,6 @@ SECTIONS { ...@@ -136,27 +136,6 @@ SECTIONS {
__end___mckinley_e9_bundles = .; __end___mckinley_e9_bundles = .;
} }
#if defined(CONFIG_PARAVIRT)
. = ALIGN(16);
.paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
__start_paravirt_bundles = .;
*(.paravirt_bundles)
__stop_paravirt_bundles = .;
}
. = ALIGN(16);
.paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
__start_paravirt_insts = .;
*(.paravirt_insts)
__stop_paravirt_insts = .;
}
. = ALIGN(16);
.paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
__start_paravirt_branches = .;
*(.paravirt_branches)
__stop_paravirt_branches = .;
}
#endif
#if defined(CONFIG_IA64_GENERIC) #if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */ /* Machine Vector */
. = ALIGN(16); . = ALIGN(16);
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/paravirt.h>
extern void ia64_tlb_init (void); extern void ia64_tlb_init (void);
...@@ -244,7 +243,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) ...@@ -244,7 +243,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
static void __init static void __init
setup_gate (void) setup_gate (void)
{ {
void *gate_section;
struct page *page; struct page *page;
/* /*
...@@ -252,11 +250,10 @@ setup_gate (void) ...@@ -252,11 +250,10 @@ setup_gate (void)
* headers etc. and once execute-only page to enable * headers etc. and once execute-only page to enable
* privilege-promotion via "epc": * privilege-promotion via "epc":
*/ */
gate_section = paravirt_get_gate_section(); page = virt_to_page(ia64_imva(__start_gate_section));
page = virt_to_page(ia64_imva(gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY); put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL #ifdef HAVE_BUGGY_SEGREL
page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else #else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
...@@ -642,8 +639,8 @@ mem_init (void) ...@@ -642,8 +639,8 @@ mem_init (void)
* code can tell them apart. * code can tell them apart.
*/ */
for (i = 0; i < NR_syscalls; ++i) { for (i = 0; i < NR_syscalls; ++i) {
extern unsigned long fsyscall_table[NR_syscalls];
extern unsigned long sys_call_table[NR_syscalls]; extern unsigned long sys_call_table[NR_syscalls];
unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
if (!fsyscall_table[i] || nolwsys) if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1; fsyscall_table[i] = sys_call_table[i] | 1;
......
#
# Checker for paravirtualizations of privileged operations.
#
s/ssm.*psr\.ic.*/.warning \"ssm psr.ic should not be used directly\"/g
s/rsm.*psr\.ic.*/.warning \"rsm psr.ic should not be used directly\"/g
s/ssm.*psr\.i.*/.warning \"ssm psr.i should not be used directly\"/g
s/rsm.*psr\.i.*/.warning \"rsm psr.i should not be used directly\"/g
s/ssm.*psr\.dt.*/.warning \"ssm psr.dt should not be used directly\"/g
s/rsm.*psr\.dt.*/.warning \"rsm psr.dt should not be used directly\"/g
s/mov.*=.*cr\.ifa/.warning \"cr.ifa should not used directly\"/g
s/mov.*=.*cr\.itir/.warning \"cr.itir should not used directly\"/g
s/mov.*=.*cr\.isr/.warning \"cr.isr should not used directly\"/g
s/mov.*=.*cr\.iha/.warning \"cr.iha should not used directly\"/g
s/mov.*=.*cr\.ipsr/.warning \"cr.ipsr should not used directly\"/g
s/mov.*=.*cr\.iim/.warning \"cr.iim should not used directly\"/g
s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g
s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
s/mov.*cr\.ipsr.*=.*/.warning \"cr.ipsr should not used directly\"/g
s/mov.*cr\.ifs.*=.*/.warning \"cr.ifs should not used directly\"/g
s/mov.*cr\.iip.*=.*/.warning \"cr.iip should not used directly\"/g
s/mov.*cr\.kr.*=.*/.warning \"cr.kr should not used directly\"/g
s/mov.*ar\.eflags.*=.*/.warning \"ar.eflags should not used directly\"/g
s/itc\.i.*/.warning \"itc.i should not be used directly.\"/g
s/itc\.d.*/.warning \"itc.d should not be used directly.\"/g
s/bsw\.0/.warning \"bsw.0 should not be used directly.\"/g
s/bsw\.1/.warning \"bsw.1 should not be used directly.\"/g
s/ptc\.ga.*/.warning \"ptc.ga should not be used directly.\"/g
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册