提交 f026399f 编写于 作者: M Marcelo Tosatti

Merge branch 'for-queue' of https://github.com/agraf/linux-2.6 into queue

* 'for-queue' of https://github.com/agraf/linux-2.6:
  PPC: ePAPR: Convert hcall header to uapi (round 2)
  KVM: PPC: Book3S HV: Fix thinko in try_lock_hpte()
  KVM: PPC: Book3S HV: Allow DTL to be set to address 0, length 0
  KVM: PPC: Book3S HV: Fix accounting of stolen time
  KVM: PPC: Book3S HV: Run virtual core whenever any vcpus in it can run
  KVM: PPC: Book3S HV: Fixes for late-joining threads
  KVM: PPC: Book3s HV: Don't access runnable threads list without vcore lock
  KVM: PPC: Book3S HV: Fix some races in starting secondary threads
  KVM: PPC: Book3S HV: Allow KVM guests to stop secondary threads coming online
  PPC: ePAPR: Convert header to uapi
  KVM: PPC: Move mtspr/mfspr emulation into own functions
  KVM: Documentation: Fix reentry-to-be-consistent paragraph
  KVM: PPC: 44x: fix DCR read/write
......@@ -2183,7 +2183,8 @@ executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO and KVM_EXIT_OSI, the corresponding
NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR
and KVM_EXIT_PAPR the corresponding
operations are complete (and guest state is consistent) only after userspace
has re-entered the kernel with KVM_RUN. The kernel side will first finish
incomplete operations and then check for pending signals. Userspace
......
/*
* ePAPR hcall interface
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Author: Timur Tabi <timur@freescale.com>
*
* This file is provided under a dual BSD/GPL license. When using or
* redistributing this file, you may do so under either license.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* A "hypercall" is an "sc 1" instruction. This header file file provides C
* wrapper functions for the ePAPR hypervisor interface. It is inteded
* for use by Linux device drivers and other operating systems.
*
* The hypercalls are implemented as inline assembly, rather than assembly
* language functions in a .S file, for optimization. It allows
* the caller to issue the hypercall instruction directly, improving both
* performance and memory footprint.
*/
#ifndef _EPAPR_HCALLS_H
#define _EPAPR_HCALLS_H
#include <uapi/asm/epapr_hcalls.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
/*
* Hypercall register clobber list
*
* These macros are used to define the list of clobbered registers during a
* hypercall. Technically, registers r0 and r3-r12 are always clobbered,
* but the gcc inline assembly syntax does not allow us to specify registers
* on the clobber list that are also on the input/output list. Therefore,
* the lists of clobbered registers depends on the number of register
* parmeters ("+r" and "=r") passed to the hypercall.
*
* Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
* general rule, 'x' is the number of parameters passed to the assembly
* block *except* for r11.
*
* If you're not sure, just use the smallest value of 'x' that does not
* generate a compilation error. Because these are static inline functions,
* the compiler will only check the clobber list for a function if you
* compile code that calls that function.
*
* r3 and r11 are not included in any clobbers list because they are always
* listed as output registers.
*
* XER, CTR, and LR are currently listed as clobbers because it's uncertain
* whether they will be clobbered.
*
* Note that r11 can be used as an output parameter.
*
* The "memory" clobber is only necessary for hcalls where the Hypervisor
* will read or write guest memory. However, we add it to all hcalls because
* the impact is minimal, and we want to ensure that it's present for the
* hcalls that need it.
*/
/* List of common clobbered registers. Do not use this macro. */
#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
extern bool epapr_paravirt_enabled;
extern u32 epapr_hypercall_start[];
/*
* We use "uintptr_t" to define a register because it's guaranteed to be a
* 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
* platform.
*
* All registers are either input/output or output only. Registers that are
* initialized before making the hypercall are input/output. All
* input/output registers are represented with "+r". Output-only registers
* are represented with "=r". Do not specify any unused registers. The
* clobber list will tell the compiler that the hypercall modifies those
* registers, which is good enough.
*/
/**
* ev_int_set_config - configure the specified interrupt
* @interrupt: the interrupt number
* @config: configuration for this interrupt
* @priority: interrupt priority
* @destination: destination CPU number
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_set_config(unsigned int interrupt,
uint32_t config, unsigned int priority, uint32_t destination)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
r3 = interrupt;
r4 = config;
r5 = priority;
r6 = destination;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
: : EV_HCALL_CLOBBERS4
);
return r3;
}
/**
* ev_int_get_config - return the config of the specified interrupt
* @interrupt: the interrupt number
* @config: returned configuration for this interrupt
* @priority: returned interrupt priority
* @destination: returned destination CPU number
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_get_config(unsigned int interrupt,
uint32_t *config, unsigned int *priority, uint32_t *destination)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
r3 = interrupt;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
: : EV_HCALL_CLOBBERS4
);
*config = r4;
*priority = r5;
*destination = r6;
return r3;
}
/**
* ev_int_set_mask - sets the mask for the specified interrupt source
* @interrupt: the interrupt number
* @mask: 0=enable interrupts, 1=disable interrupts
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_set_mask(unsigned int interrupt,
unsigned int mask)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
r3 = interrupt;
r4 = mask;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4)
: : EV_HCALL_CLOBBERS2
);
return r3;
}
/**
* ev_int_get_mask - returns the mask for the specified interrupt source
* @interrupt: the interrupt number
* @mask: returned mask for this interrupt (0=enabled, 1=disabled)
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_get_mask(unsigned int interrupt,
unsigned int *mask)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
r3 = interrupt;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
*mask = r4;
return r3;
}
/**
* ev_int_eoi - signal the end of interrupt processing
* @interrupt: the interrupt number
*
* This function signals the end of processing for the the specified
* interrupt, which must be the interrupt currently in service. By
* definition, this is also the highest-priority interrupt.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_eoi(unsigned int interrupt)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
r11 = EV_HCALL_TOKEN(EV_INT_EOI);
r3 = interrupt;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
return r3;
}
/**
* ev_byte_channel_send - send characters to a byte stream
* @handle: byte stream handle
* @count: (input) num of chars to send, (output) num chars sent
* @buffer: pointer to a 16-byte buffer
*
* @buffer must be at least 16 bytes long, because all 16 bytes will be
* read from memory into registers, even if count < 16.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_byte_channel_send(unsigned int handle,
unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
register uintptr_t r7 __asm__("r7");
register uintptr_t r8 __asm__("r8");
const uint32_t *p = (const uint32_t *) buffer;
r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
r3 = handle;
r4 = *count;
r5 = be32_to_cpu(p[0]);
r6 = be32_to_cpu(p[1]);
r7 = be32_to_cpu(p[2]);
r8 = be32_to_cpu(p[3]);
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3),
"+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
: : EV_HCALL_CLOBBERS6
);
*count = r4;
return r3;
}
/**
* ev_byte_channel_receive - fetch characters from a byte channel
* @handle: byte channel handle
* @count: (input) max num of chars to receive, (output) num chars received
* @buffer: pointer to a 16-byte buffer
*
* The size of @buffer must be at least 16 bytes, even if you request fewer
* than 16 characters, because we always write 16 bytes to @buffer. This is
* for performance reasons.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_byte_channel_receive(unsigned int handle,
unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
register uintptr_t r7 __asm__("r7");
register uintptr_t r8 __asm__("r8");
uint32_t *p = (uint32_t *) buffer;
r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
r3 = handle;
r4 = *count;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4),
"=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
: : EV_HCALL_CLOBBERS6
);
*count = r4;
p[0] = cpu_to_be32(r5);
p[1] = cpu_to_be32(r6);
p[2] = cpu_to_be32(r7);
p[3] = cpu_to_be32(r8);
return r3;
}
/**
* ev_byte_channel_poll - returns the status of the byte channel buffers
* @handle: byte channel handle
* @rx_count: returned count of bytes in receive queue
* @tx_count: returned count of free space in transmit queue
*
* This function reports the amount of data in the receive queue (i.e. the
* number of bytes you can read), and the amount of free space in the transmit
* queue (i.e. the number of bytes you can write).
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_byte_channel_poll(unsigned int handle,
unsigned int *rx_count, unsigned int *tx_count)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
r3 = handle;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
: : EV_HCALL_CLOBBERS3
);
*rx_count = r4;
*tx_count = r5;
return r3;
}
/**
* ev_int_iack - acknowledge an interrupt
* @handle: handle to the target interrupt controller
* @vector: returned interrupt vector
*
* If handle is zero, the function returns the next interrupt source
* number to be handled irrespective of the hierarchy or cascading
* of interrupt controllers. If non-zero, specifies a handle to the
* interrupt controller that is the target of the acknowledge.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_iack(unsigned int handle,
unsigned int *vector)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
r11 = EV_HCALL_TOKEN(EV_INT_IACK);
r3 = handle;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
*vector = r4;
return r3;
}
/**
* ev_doorbell_send - send a doorbell to another partition
* @handle: doorbell send handle
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_doorbell_send(unsigned int handle)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
r3 = handle;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
return r3;
}
/**
* ev_idle -- wait for next interrupt on this core
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_idle(void)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
r11 = EV_HCALL_TOKEN(EV_IDLE);
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "=r" (r3)
: : EV_HCALL_CLOBBERS1
);
return r3;
}
#endif /* !__ASSEMBLY__ */
#endif /* _EPAPR_HCALLS_H */
......@@ -118,6 +118,7 @@
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
#define RESUME_FLAG_ARCH1 (1<<2)
#define RESUME_GUEST 0
#define RESUME_GUEST_NV RESUME_FLAG_NV
......
......@@ -60,7 +60,7 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
" ori %0,%0,%4\n"
" stdcx. %0,0,%2\n"
" beq+ 2f\n"
" li %1,%3\n"
" mr %1,%3\n"
"2: isync"
: "=&r" (tmp), "=&r" (old)
: "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
......
......@@ -289,9 +289,10 @@ struct kvmppc_vcore {
/* Values for vcore_state */
#define VCORE_INACTIVE 0
#define VCORE_RUNNING 1
#define VCORE_EXITING 2
#define VCORE_SLEEPING 3
#define VCORE_SLEEPING 1
#define VCORE_STARTING 2
#define VCORE_RUNNING 3
#define VCORE_EXITING 4
/*
* Struct used to manage memory for a virtual processor area
......@@ -558,13 +559,17 @@ struct kvm_vcpu_arch {
unsigned long dtl_index;
u64 stolen_logged;
struct kvmppc_vpa slb_shadow;
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
#endif
};
/* Values for vcpu->arch.state */
#define KVMPPC_VCPU_STOPPED 0
#define KVMPPC_VCPU_BUSY_IN_HOST 1
#define KVMPPC_VCPU_RUNNABLE 2
#define KVMPPC_VCPU_NOTREADY 0
#define KVMPPC_VCPU_RUNNABLE 1
#define KVMPPC_VCPU_BUSY_IN_HOST 2
/* Values for vcpu->arch.io_gpr */
#define KVM_MMIO_REG_MASK 0x001f
......
......@@ -67,6 +67,14 @@ void generic_mach_cpu_die(void);
void generic_set_cpu_dead(unsigned int cpu);
void generic_set_cpu_up(unsigned int cpu);
int generic_check_cpu_restart(unsigned int cpu);
extern void inhibit_secondary_onlining(void);
extern void uninhibit_secondary_onlining(void);
#else /* HOTPLUG_CPU */
static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {}
#endif
#ifdef CONFIG_PPC64
......
......@@ -7,6 +7,7 @@ header-y += bootx.h
header-y += byteorder.h
header-y += cputable.h
header-y += elf.h
header-y += epapr_hcalls.h
header-y += errno.h
header-y += fcntl.h
header-y += ioctl.h
......@@ -42,4 +43,3 @@ header-y += termios.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
header-y += epapr_hcalls.h
......@@ -37,18 +37,8 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* A "hypercall" is an "sc 1" instruction. This header file file provides C
* wrapper functions for the ePAPR hypervisor interface. It is inteded
* for use by Linux device drivers and other operating systems.
*
* The hypercalls are implemented as inline assembly, rather than assembly
* language functions in a .S file, for optimization. It allows
* the caller to issue the hypercall instruction directly, improving both
* performance and memory footprint.
*/
#ifndef _EPAPR_HCALLS_H
#define _EPAPR_HCALLS_H
#ifndef _UAPI_ASM_POWERPC_EPAPR_HCALLS_H
#define _UAPI_ASM_POWERPC_EPAPR_HCALLS_H
#define EV_BYTE_CHANNEL_SEND 1
#define EV_BYTE_CHANNEL_RECEIVE 2
......@@ -105,407 +95,4 @@
#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
/*
* Hypercall register clobber list
*
* These macros are used to define the list of clobbered registers during a
* hypercall. Technically, registers r0 and r3-r12 are always clobbered,
* but the gcc inline assembly syntax does not allow us to specify registers
* on the clobber list that are also on the input/output list. Therefore,
* the lists of clobbered registers depends on the number of register
* parmeters ("+r" and "=r") passed to the hypercall.
*
* Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
* general rule, 'x' is the number of parameters passed to the assembly
* block *except* for r11.
*
* If you're not sure, just use the smallest value of 'x' that does not
* generate a compilation error. Because these are static inline functions,
* the compiler will only check the clobber list for a function if you
* compile code that calls that function.
*
* r3 and r11 are not included in any clobbers list because they are always
* listed as output registers.
*
* XER, CTR, and LR are currently listed as clobbers because it's uncertain
* whether they will be clobbered.
*
* Note that r11 can be used as an output parameter.
*
* The "memory" clobber is only necessary for hcalls where the Hypervisor
* will read or write guest memory. However, we add it to all hcalls because
* the impact is minimal, and we want to ensure that it's present for the
* hcalls that need it.
*/
/* List of common clobbered registers. Do not use this macro. */
#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
extern bool epapr_paravirt_enabled;
extern u32 epapr_hypercall_start[];
/*
* We use "uintptr_t" to define a register because it's guaranteed to be a
* 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
* platform.
*
* All registers are either input/output or output only. Registers that are
* initialized before making the hypercall are input/output. All
* input/output registers are represented with "+r". Output-only registers
* are represented with "=r". Do not specify any unused registers. The
* clobber list will tell the compiler that the hypercall modifies those
* registers, which is good enough.
*/
/**
* ev_int_set_config - configure the specified interrupt
* @interrupt: the interrupt number
* @config: configuration for this interrupt
* @priority: interrupt priority
* @destination: destination CPU number
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_set_config(unsigned int interrupt,
uint32_t config, unsigned int priority, uint32_t destination)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
r3 = interrupt;
r4 = config;
r5 = priority;
r6 = destination;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
: : EV_HCALL_CLOBBERS4
);
return r3;
}
/**
* ev_int_get_config - return the config of the specified interrupt
* @interrupt: the interrupt number
* @config: returned configuration for this interrupt
* @priority: returned interrupt priority
* @destination: returned destination CPU number
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_get_config(unsigned int interrupt,
uint32_t *config, unsigned int *priority, uint32_t *destination)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
r3 = interrupt;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
: : EV_HCALL_CLOBBERS4
);
*config = r4;
*priority = r5;
*destination = r6;
return r3;
}
/**
* ev_int_set_mask - sets the mask for the specified interrupt source
* @interrupt: the interrupt number
* @mask: 0=enable interrupts, 1=disable interrupts
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_set_mask(unsigned int interrupt,
unsigned int mask)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
r3 = interrupt;
r4 = mask;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4)
: : EV_HCALL_CLOBBERS2
);
return r3;
}
/**
* ev_int_get_mask - returns the mask for the specified interrupt source
* @interrupt: the interrupt number
* @mask: returned mask for this interrupt (0=enabled, 1=disabled)
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_get_mask(unsigned int interrupt,
unsigned int *mask)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
r3 = interrupt;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
*mask = r4;
return r3;
}
/**
* ev_int_eoi - signal the end of interrupt processing
* @interrupt: the interrupt number
*
* This function signals the end of processing for the the specified
* interrupt, which must be the interrupt currently in service. By
* definition, this is also the highest-priority interrupt.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_eoi(unsigned int interrupt)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
r11 = EV_HCALL_TOKEN(EV_INT_EOI);
r3 = interrupt;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
return r3;
}
/**
* ev_byte_channel_send - send characters to a byte stream
* @handle: byte stream handle
* @count: (input) num of chars to send, (output) num chars sent
* @buffer: pointer to a 16-byte buffer
*
* @buffer must be at least 16 bytes long, because all 16 bytes will be
* read from memory into registers, even if count < 16.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_byte_channel_send(unsigned int handle,
unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
register uintptr_t r7 __asm__("r7");
register uintptr_t r8 __asm__("r8");
const uint32_t *p = (const uint32_t *) buffer;
r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
r3 = handle;
r4 = *count;
r5 = be32_to_cpu(p[0]);
r6 = be32_to_cpu(p[1]);
r7 = be32_to_cpu(p[2]);
r8 = be32_to_cpu(p[3]);
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3),
"+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
: : EV_HCALL_CLOBBERS6
);
*count = r4;
return r3;
}
/**
* ev_byte_channel_receive - fetch characters from a byte channel
* @handle: byte channel handle
* @count: (input) max num of chars to receive, (output) num chars received
* @buffer: pointer to a 16-byte buffer
*
* The size of @buffer must be at least 16 bytes, even if you request fewer
* than 16 characters, because we always write 16 bytes to @buffer. This is
* for performance reasons.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_byte_channel_receive(unsigned int handle,
unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
register uintptr_t r6 __asm__("r6");
register uintptr_t r7 __asm__("r7");
register uintptr_t r8 __asm__("r8");
uint32_t *p = (uint32_t *) buffer;
r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
r3 = handle;
r4 = *count;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "+r" (r4),
"=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
: : EV_HCALL_CLOBBERS6
);
*count = r4;
p[0] = cpu_to_be32(r5);
p[1] = cpu_to_be32(r6);
p[2] = cpu_to_be32(r7);
p[3] = cpu_to_be32(r8);
return r3;
}
/**
* ev_byte_channel_poll - returns the status of the byte channel buffers
* @handle: byte channel handle
* @rx_count: returned count of bytes in receive queue
* @tx_count: returned count of free space in transmit queue
*
* This function reports the amount of data in the receive queue (i.e. the
* number of bytes you can read), and the amount of free space in the transmit
* queue (i.e. the number of bytes you can write).
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_byte_channel_poll(unsigned int handle,
unsigned int *rx_count, unsigned int *tx_count)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
register uintptr_t r5 __asm__("r5");
r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
r3 = handle;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
: : EV_HCALL_CLOBBERS3
);
*rx_count = r4;
*tx_count = r5;
return r3;
}
/**
* ev_int_iack - acknowledge an interrupt
* @handle: handle to the target interrupt controller
* @vector: returned interrupt vector
*
* If handle is zero, the function returns the next interrupt source
* number to be handled irrespective of the hierarchy or cascading
* of interrupt controllers. If non-zero, specifies a handle to the
* interrupt controller that is the target of the acknowledge.
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_int_iack(unsigned int handle,
unsigned int *vector)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
register uintptr_t r4 __asm__("r4");
r11 = EV_HCALL_TOKEN(EV_INT_IACK);
r3 = handle;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3), "=r" (r4)
: : EV_HCALL_CLOBBERS2
);
*vector = r4;
return r3;
}
/**
* ev_doorbell_send - send a doorbell to another partition
* @handle: doorbell send handle
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_doorbell_send(unsigned int handle)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
r3 = handle;
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "+r" (r3)
: : EV_HCALL_CLOBBERS1
);
return r3;
}
/**
* ev_idle -- wait for next interrupt on this core
*
* Returns 0 for success, or an error code.
*/
static inline unsigned int ev_idle(void)
{
register uintptr_t r11 __asm__("r11");
register uintptr_t r3 __asm__("r3");
r11 = EV_HCALL_TOKEN(EV_IDLE);
asm volatile("bl epapr_hypercall_start"
: "+r" (r11), "=r" (r3)
: : EV_HCALL_CLOBBERS1
);
return r3;
}
#endif /* !__ASSEMBLY__ */
#endif
#endif /* _UAPI_ASM_POWERPC_EPAPR_HCALLS_H */
......@@ -427,6 +427,45 @@ int generic_check_cpu_restart(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
static atomic_t secondary_inhibit_count;
/*
* Don't allow secondary CPU threads to come online
*/
void inhibit_secondary_onlining(void)
{
/*
* This makes secondary_inhibit_count stable during cpu
* online/offline operations.
*/
get_online_cpus();
atomic_inc(&secondary_inhibit_count);
put_online_cpus();
}
EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);
/*
* Allow secondary CPU threads to come online again
*/
void uninhibit_secondary_onlining(void)
{
get_online_cpus();
atomic_dec(&secondary_inhibit_count);
put_online_cpus();
}
EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);
static int secondaries_inhibited(void)
{
return atomic_read(&secondary_inhibit_count);
}
#else /* HOTPLUG_CPU */
#define secondaries_inhibited() 0
#endif
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
......@@ -445,6 +484,13 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
/*
* Don't allow secondary threads to come online if inhibited
*/
if (threads_per_core > 1 && secondaries_inhibited() &&
cpu % threads_per_core != 0)
return -EBUSY;
if (smp_ops == NULL ||
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
......
......@@ -46,6 +46,7 @@ static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn)
vcpu->run->dcr.dcrn = dcrn;
vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs);
vcpu->run->dcr.is_write = 1;
vcpu->arch.dcr_is_write = 1;
vcpu->arch.dcr_needed = 1;
kvmppc_account_exit(vcpu, DCR_EXITS);
return EMULATE_DO_DCR;
......@@ -80,6 +81,7 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
vcpu->run->dcr.dcrn = dcrn;
vcpu->run->dcr.data = 0;
vcpu->run->dcr.is_write = 0;
vcpu->arch.dcr_is_write = 0;
vcpu->arch.io_gpr = rt;
vcpu->arch.dcr_needed = 1;
kvmppc_account_exit(vcpu, DCR_EXITS);
......
此差异已折叠。
......@@ -134,8 +134,11 @@ kvm_start_guest:
27: /* XXX should handle hypervisor maintenance interrupts etc. here */
/* reload vcpu pointer after clearing the IPI */
ld r4,HSTATE_KVM_VCPU(r13)
cmpdi r4,0
/* if we have no vcpu to run, go back to sleep */
beq cr1,kvm_no_guest
beq kvm_no_guest
/* were we napping due to cede? */
lbz r0,HSTATE_NAPPING(r13)
......@@ -1587,6 +1590,10 @@ secondary_too_late:
.endr
secondary_nap:
/* Clear our vcpu pointer so we don't come back in early */
li r0, 0
std r0, HSTATE_KVM_VCPU(r13)
lwsync
/* Clear any pending IPI - assume we're a secondary thread */
ld r5, HSTATE_XICS_PHYS(r13)
li r7, XICS_XIRR
......@@ -1612,8 +1619,6 @@ secondary_nap:
kvm_no_guest:
li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13)
li r0, 0
std r0, HSTATE_KVM_VCPU(r13)
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
......
......@@ -131,6 +131,125 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
return vcpu->arch.dec - jd;
}
static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
enum emulation_result emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SRR0:
vcpu->arch.shared->srr0 = spr_val;
break;
case SPRN_SRR1:
vcpu->arch.shared->srr1 = spr_val;
break;
/* XXX We need to context-switch the timebase for
* watchdog and FIT. */
case SPRN_TBWL: break;
case SPRN_TBWU: break;
case SPRN_MSSSR0: break;
case SPRN_DEC:
vcpu->arch.dec = spr_val;
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
vcpu->arch.shared->sprg0 = spr_val;
break;
case SPRN_SPRG1:
vcpu->arch.shared->sprg1 = spr_val;
break;
case SPRN_SPRG2:
vcpu->arch.shared->sprg2 = spr_val;
break;
case SPRN_SPRG3:
vcpu->arch.shared->sprg3 = spr_val;
break;
default:
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
spr_val);
if (emulated == EMULATE_FAIL)
printk(KERN_INFO "mtspr: unknown spr "
"0x%x\n", sprn);
break;
}
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
return emulated;
}
static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
enum emulation_result emulated = EMULATE_DONE;
ulong spr_val = 0;
switch (sprn) {
case SPRN_SRR0:
spr_val = vcpu->arch.shared->srr0;
break;
case SPRN_SRR1:
spr_val = vcpu->arch.shared->srr1;
break;
case SPRN_PVR:
spr_val = vcpu->arch.pvr;
break;
case SPRN_PIR:
spr_val = vcpu->vcpu_id;
break;
case SPRN_MSSSR0:
spr_val = 0;
break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
spr_val = get_tb() >> 32;
break;
case SPRN_TBWU:
spr_val = get_tb();
break;
case SPRN_SPRG0:
spr_val = vcpu->arch.shared->sprg0;
break;
case SPRN_SPRG1:
spr_val = vcpu->arch.shared->sprg1;
break;
case SPRN_SPRG2:
spr_val = vcpu->arch.shared->sprg2;
break;
case SPRN_SPRG3:
spr_val = vcpu->arch.shared->sprg3;
break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
default:
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
&spr_val);
if (unlikely(emulated == EMULATE_FAIL)) {
printk(KERN_INFO "mfspr: unknown spr "
"0x%x\n", sprn);
}
break;
}
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, rt, spr_val);
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
return emulated;
}
/* XXX to do:
* lhax
* lhaux
......@@ -156,7 +275,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
int sprn = get_sprn(inst);
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
ulong spr_val = 0;
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
......@@ -236,62 +354,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_MFSPR:
switch (sprn) {
case SPRN_SRR0:
spr_val = vcpu->arch.shared->srr0;
break;
case SPRN_SRR1:
spr_val = vcpu->arch.shared->srr1;
break;
case SPRN_PVR:
spr_val = vcpu->arch.pvr;
break;
case SPRN_PIR:
spr_val = vcpu->vcpu_id;
break;
case SPRN_MSSSR0:
spr_val = 0;
break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
spr_val = get_tb() >> 32;
break;
case SPRN_TBWU:
spr_val = get_tb();
break;
case SPRN_SPRG0:
spr_val = vcpu->arch.shared->sprg0;
break;
case SPRN_SPRG1:
spr_val = vcpu->arch.shared->sprg1;
break;
case SPRN_SPRG2:
spr_val = vcpu->arch.shared->sprg2;
break;
case SPRN_SPRG3:
spr_val = vcpu->arch.shared->sprg3;
break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
default:
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
&spr_val);
if (unlikely(emulated == EMULATE_FAIL)) {
printk(KERN_INFO "mfspr: unknown spr "
"0x%x\n", sprn);
}
break;
}
kvmppc_set_gpr(vcpu, rt, spr_val);
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
break;
case OP_31_XOP_STHX:
......@@ -308,49 +371,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_MTSPR:
spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SRR0:
vcpu->arch.shared->srr0 = spr_val;
break;
case SPRN_SRR1:
vcpu->arch.shared->srr1 = spr_val;
break;
/* XXX We need to context-switch the timebase for
* watchdog and FIT. */
case SPRN_TBWL: break;
case SPRN_TBWU: break;
case SPRN_MSSSR0: break;
case SPRN_DEC:
vcpu->arch.dec = spr_val;
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
vcpu->arch.shared->sprg0 = spr_val;
break;
case SPRN_SPRG1:
vcpu->arch.shared->sprg1 = spr_val;
break;
case SPRN_SPRG2:
vcpu->arch.shared->sprg2 = spr_val;
break;
case SPRN_SPRG3:
vcpu->arch.shared->sprg3 = spr_val;
break;
default:
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
spr_val);
if (emulated == EMULATE_FAIL)
printk(KERN_INFO "mtspr: unknown spr "
"0x%x\n", sprn);
break;
}
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
break;
case OP_31_XOP_DCBI:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册