提交 62955e10 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging

Miscellaneous bugfixes

# gpg: Signature made Wed 15 Nov 2017 15:27:25 GMT
# gpg:                using RSA key 0xBFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream:
  fix scripts/update-linux-headers.sh here document
  exec: Do not resolve subpage in mru_section
  util/stats64: Fix min/max comparisons
  cpu-exec: avoid cpu_exec_nocache infinite loop with record/replay
  cpu-exec: don't overwrite exception_index
  vhost-user-scsi: add missing virtqueue_size param
  target-i386: adds PV_TLB_FLUSH CPUID feature bit
  thread-posix: fix qemu_rec_mutex_trylock macro
  Makefile: simpler/faster "make help"
  ioapic/tracing: Remove last DPRINTFs
  Enable 8-byte wide MMIO for 16550 serial devices
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
...@@ -6,7 +6,7 @@ BUILD_DIR=$(CURDIR) ...@@ -6,7 +6,7 @@ BUILD_DIR=$(CURDIR)
# Before including a proper config-host.mak, assume we are in the source tree # Before including a proper config-host.mak, assume we are in the source tree
SRC_PATH=. SRC_PATH=.
UNCHECKED_GOALS := %clean TAGS cscope ctags docker docker-% UNCHECKED_GOALS := %clean TAGS cscope ctags docker docker-% help
# All following code might depend on configuration variables # All following code might depend on configuration variables
ifneq ($(wildcard config-host.mak),) ifneq ($(wildcard config-host.mak),)
......
...@@ -470,7 +470,19 @@ static inline void cpu_handle_debug_exception(CPUState *cpu) ...@@ -470,7 +470,19 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
static inline bool cpu_handle_exception(CPUState *cpu, int *ret) static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
{ {
if (cpu->exception_index >= 0) { if (cpu->exception_index < 0) {
#ifndef CONFIG_USER_ONLY
if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
}
#endif
if (cpu->exception_index < 0) {
return false;
}
}
if (cpu->exception_index >= EXCP_INTERRUPT) { if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */ /* exit request from the cpu execution loop */
*ret = cpu->exception_index; *ret = cpu->exception_index;
...@@ -503,15 +515,6 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) ...@@ -503,15 +515,6 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
*ret = EXCP_INTERRUPT; *ret = EXCP_INTERRUPT;
return true; return true;
} }
#endif
}
#ifndef CONFIG_USER_ONLY
} else if (replay_has_exception()
&& cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
/* try to cause an exception pending in the log */
cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
*ret = -1;
return true;
#endif #endif
} }
...@@ -522,6 +525,19 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, ...@@ -522,6 +525,19 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb) TranslationBlock **last_tb)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
int32_t insns_left;
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
*/
insns_left = atomic_read(&cpu->icount_decr.u32);
atomic_set(&cpu->icount_decr.u16.high, 0);
if (unlikely(insns_left < 0)) {
/* Ensure the zeroing of icount_decr comes before the next read
* of cpu->exit_request or cpu->interrupt_request.
*/
smp_mb();
}
if (unlikely(atomic_read(&cpu->interrupt_request))) { if (unlikely(atomic_read(&cpu->interrupt_request))) {
int interrupt_request; int interrupt_request;
...@@ -594,7 +610,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, ...@@ -594,7 +610,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (unlikely(atomic_read(&cpu->exit_request) if (unlikely(atomic_read(&cpu->exit_request)
|| (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) { || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) {
atomic_set(&cpu->exit_request, 0); atomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT; cpu->exception_index = EXCP_INTERRUPT;
}
return true; return true;
} }
...@@ -618,17 +636,14 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, ...@@ -618,17 +636,14 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
*last_tb = NULL; *last_tb = NULL;
insns_left = atomic_read(&cpu->icount_decr.u32); insns_left = atomic_read(&cpu->icount_decr.u32);
atomic_set(&cpu->icount_decr.u16.high, 0);
if (insns_left < 0) { if (insns_left < 0) {
/* Something asked us to stop executing chained TBs; just /* Something asked us to stop executing chained TBs; just
* continue round the main loop. Whatever requested the exit * continue round the main loop. Whatever requested the exit
* will also have set something else (eg exit_request or * will also have set something else (eg exit_request or
* interrupt_request) which we will handle next time around * interrupt_request) which will be handled by
* the loop. But we need to ensure the zeroing of icount_decr * cpu_handle_interrupt. cpu_handle_interrupt will also
* comes before the next read of cpu->exit_request * clear cpu->icount_decr.u16.high.
* or cpu->interrupt_request.
*/ */
smp_mb();
return; return;
} }
......
...@@ -410,22 +410,16 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, ...@@ -410,22 +410,16 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
{ {
MemoryRegionSection *section = atomic_read(&d->mru_section); MemoryRegionSection *section = atomic_read(&d->mru_section);
subpage_t *subpage; subpage_t *subpage;
bool update;
if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] && if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
section_covers_addr(section, addr)) { !section_covers_addr(section, addr)) {
update = false;
} else {
section = phys_page_find(d, addr); section = phys_page_find(d, addr);
update = true; atomic_set(&d->mru_section, section);
} }
if (resolve_subpage && section->mr->subpage) { if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem); subpage = container_of(section->mr, subpage_t, iomem);
section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
} }
if (update) {
atomic_set(&d->mru_section, section);
}
return section; return section;
} }
......
...@@ -1005,7 +1005,7 @@ static void serial_mm_write(void *opaque, hwaddr addr, ...@@ -1005,7 +1005,7 @@ static void serial_mm_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size) uint64_t value, unsigned size)
{ {
SerialState *s = opaque; SerialState *s = opaque;
value &= ~0u >> (32 - (size * 8)); value &= 255;
serial_ioport_write(s, addr >> s->it_shift, value, 1); serial_ioport_write(s, addr >> s->it_shift, value, 1);
} }
...@@ -1014,16 +1014,22 @@ static const MemoryRegionOps serial_mm_ops[3] = { ...@@ -1014,16 +1014,22 @@ static const MemoryRegionOps serial_mm_ops[3] = {
.read = serial_mm_read, .read = serial_mm_read,
.write = serial_mm_write, .write = serial_mm_write,
.endianness = DEVICE_NATIVE_ENDIAN, .endianness = DEVICE_NATIVE_ENDIAN,
.valid.max_access_size = 8,
.impl.max_access_size = 8,
}, },
[DEVICE_LITTLE_ENDIAN] = { [DEVICE_LITTLE_ENDIAN] = {
.read = serial_mm_read, .read = serial_mm_read,
.write = serial_mm_write, .write = serial_mm_write,
.endianness = DEVICE_LITTLE_ENDIAN, .endianness = DEVICE_LITTLE_ENDIAN,
.valid.max_access_size = 8,
.impl.max_access_size = 8,
}, },
[DEVICE_BIG_ENDIAN] = { [DEVICE_BIG_ENDIAN] = {
.read = serial_mm_read, .read = serial_mm_read,
.write = serial_mm_write, .write = serial_mm_write,
.endianness = DEVICE_BIG_ENDIAN, .endianness = DEVICE_BIG_ENDIAN,
.valid.max_access_size = 8,
.impl.max_access_size = 8,
}, },
}; };
......
...@@ -35,15 +35,6 @@ ...@@ -35,15 +35,6 @@
#include "hw/i386/x86-iommu.h" #include "hw/i386/x86-iommu.h"
#include "trace.h" #include "trace.h"
//#define DEBUG_IOAPIC
#ifdef DEBUG_IOAPIC
#define DPRINTF(fmt, ...) \
do { printf("ioapic: " fmt , ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...)
#endif
#define APIC_DELIVERY_MODE_SHIFT 8 #define APIC_DELIVERY_MODE_SHIFT 8
#define APIC_POLARITY_SHIFT 14 #define APIC_POLARITY_SHIFT 14
#define APIC_TRIG_MODE_SHIFT 15 #define APIC_TRIG_MODE_SHIFT 15
...@@ -157,7 +148,7 @@ static void ioapic_set_irq(void *opaque, int vector, int level) ...@@ -157,7 +148,7 @@ static void ioapic_set_irq(void *opaque, int vector, int level)
* to GSI 2. GSI maps to ioapic 1-1. This is not * to GSI 2. GSI maps to ioapic 1-1. This is not
* the cleanest way of doing it but it should work. */ * the cleanest way of doing it but it should work. */
DPRINTF("%s: %s vec %x\n", __func__, level ? "raise" : "lower", vector); trace_ioapic_set_irq(vector, level);
if (vector == 0) { if (vector == 0) {
vector = 2; vector = 2;
} }
...@@ -290,11 +281,10 @@ ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size) ...@@ -290,11 +281,10 @@ ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size)
} }
} }
} }
DPRINTF("read: %08x = %08x\n", s->ioregsel, val);
break; break;
} }
trace_ioapic_mem_read(addr, size, val); trace_ioapic_mem_read(addr, s->ioregsel, size, val);
return val; return val;
} }
...@@ -335,7 +325,7 @@ ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val, ...@@ -335,7 +325,7 @@ ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val,
int index; int index;
addr &= 0xff; addr &= 0xff;
trace_ioapic_mem_write(addr, size, val); trace_ioapic_mem_write(addr, s->ioregsel, size, val);
switch (addr) { switch (addr) {
case IOAPIC_IOREGSEL: case IOAPIC_IOREGSEL:
...@@ -345,7 +335,6 @@ ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val, ...@@ -345,7 +335,6 @@ ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val,
if (size != 4) { if (size != 4) {
break; break;
} }
DPRINTF("write: %08x = %08" PRIx64 "\n", s->ioregsel, val);
switch (s->ioregsel) { switch (s->ioregsel) {
case IOAPIC_REG_ID: case IOAPIC_REG_ID:
s->id = (val >> IOAPIC_ID_SHIFT) & IOAPIC_ID_MASK; s->id = (val >> IOAPIC_ID_SHIFT) & IOAPIC_ID_MASK;
......
...@@ -18,8 +18,9 @@ apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" ...@@ -18,8 +18,9 @@ apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x"
ioapic_set_remote_irr(int n) "set remote irr for pin %d" ioapic_set_remote_irr(int n) "set remote irr for pin %d"
ioapic_clear_remote_irr(int n, int vector) "clear remote irr for pin %d vector %d" ioapic_clear_remote_irr(int n, int vector) "clear remote irr for pin %d vector %d"
ioapic_eoi_broadcast(int vector) "EOI broadcast for vector %d" ioapic_eoi_broadcast(int vector) "EOI broadcast for vector %d"
ioapic_mem_read(uint8_t addr, uint8_t size, uint32_t val) "ioapic mem read addr 0x%"PRIx8" size 0x%"PRIx8" retval 0x%"PRIx32 ioapic_mem_read(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem read addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" retval 0x%"PRIx32
ioapic_mem_write(uint8_t addr, uint8_t size, uint32_t val) "ioapic mem write addr 0x%"PRIx8" size 0x%"PRIx8" val 0x%"PRIx32 ioapic_mem_write(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem write addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" val 0x%"PRIx32
ioapic_set_irq(int vector, int level) "vector: %d level: %d"
# hw/intc/slavio_intctl.c # hw/intc/slavio_intctl.c
slavio_intctl_mem_readl(uint32_t cpu, uint64_t addr, uint32_t ret) "read cpu %d reg 0x%"PRIx64" = 0x%x" slavio_intctl_mem_readl(uint32_t cpu, uint64_t addr, uint32_t ret) "read cpu %d reg 0x%"PRIx64" = 0x%x"
......
...@@ -135,6 +135,8 @@ static Property vhost_user_scsi_properties[] = { ...@@ -135,6 +135,8 @@ static Property vhost_user_scsi_properties[] = {
DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev), DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev),
DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0), DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0),
DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues, 1), DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues, 1),
DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSICommon, conf.virtqueue_size,
128),
DEFINE_PROP_UINT32("max_sectors", VirtIOSCSICommon, conf.max_sectors, DEFINE_PROP_UINT32("max_sectors", VirtIOSCSICommon, conf.max_sectors,
0xFFFF), 0xFFFF),
DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSICommon, conf.cmd_per_lun, 128), DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSICommon, conf.cmd_per_lun, 128),
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
typedef QemuMutex QemuRecMutex; typedef QemuMutex QemuRecMutex;
#define qemu_rec_mutex_destroy qemu_mutex_destroy #define qemu_rec_mutex_destroy qemu_mutex_destroy
#define qemu_rec_mutex_lock qemu_mutex_lock #define qemu_rec_mutex_lock qemu_mutex_lock
#define qemu_rec_mutex_try_lock qemu_mutex_try_lock #define qemu_rec_mutex_trylock qemu_mutex_trylock
#define qemu_rec_mutex_unlock qemu_mutex_unlock #define qemu_rec_mutex_unlock qemu_mutex_unlock
struct QemuMutex { struct QemuMutex {
......
...@@ -106,7 +106,7 @@ for arch in $ARCHLIST; do ...@@ -106,7 +106,7 @@ for arch in $ARCHLIST; do
if [ $arch = x86 ]; then if [ $arch = x86 ]; then
cat <<-EOF >"$output/include/standard-headers/asm-x86/hyperv.h" cat <<-EOF >"$output/include/standard-headers/asm-x86/hyperv.h"
/* this is a temporary placeholder until kvm_para.h stops including it */ /* this is a temporary placeholder until kvm_para.h stops including it */
EOF EOF
cp "$tmpdir/include/asm/unistd_32.h" "$output/linux-headers/asm-x86/" cp "$tmpdir/include/asm/unistd_32.h" "$output/linux-headers/asm-x86/"
cp "$tmpdir/include/asm/unistd_x32.h" "$output/linux-headers/asm-x86/" cp "$tmpdir/include/asm/unistd_x32.h" "$output/linux-headers/asm-x86/"
cp "$tmpdir/include/asm/unistd_64.h" "$output/linux-headers/asm-x86/" cp "$tmpdir/include/asm/unistd_64.h" "$output/linux-headers/asm-x86/"
......
...@@ -347,7 +347,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { ...@@ -347,7 +347,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.feat_names = { .feat_names = {
"kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
"kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
NULL, NULL, NULL, NULL, NULL, "kvm-pv-tlb-flush", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
......
...@@ -91,7 +91,7 @@ bool stat64_min_slow(Stat64 *s, uint64_t value) ...@@ -91,7 +91,7 @@ bool stat64_min_slow(Stat64 *s, uint64_t value)
low = atomic_read(&s->low); low = atomic_read(&s->low);
orig = ((uint64_t)high << 32) | low; orig = ((uint64_t)high << 32) | low;
if (orig < value) { if (value < orig) {
/* We have to set low before high, just like stat64_min reads /* We have to set low before high, just like stat64_min reads
* high before low. The value may become higher temporarily, but * high before low. The value may become higher temporarily, but
* stat64_get does not notice (it takes the lock) and the only ill * stat64_get does not notice (it takes the lock) and the only ill
...@@ -120,7 +120,7 @@ bool stat64_max_slow(Stat64 *s, uint64_t value) ...@@ -120,7 +120,7 @@ bool stat64_max_slow(Stat64 *s, uint64_t value)
low = atomic_read(&s->low); low = atomic_read(&s->low);
orig = ((uint64_t)high << 32) | low; orig = ((uint64_t)high << 32) | low;
if (orig > value) { if (value > orig) {
/* We have to set low before high, just like stat64_max reads /* We have to set low before high, just like stat64_max reads
* high before low. The value may become lower temporarily, but * high before low. The value may become lower temporarily, but
* stat64_get does not notice (it takes the lock) and the only ill * stat64_get does not notice (it takes the lock) and the only ill
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册