提交 1d746ee9 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/famz/tags/block-and-testing-pull-request' into staging

Block and testing patches for 3.1

- aio fixes by me
- nvme fixes by Paolo and me
- test improvements by Peter, Phil and me

# gpg: Signature made Wed 15 Aug 2018 04:11:43 BST
# gpg:                using RSA key CA35624C6A9171C6
# gpg: Good signature from "Fam Zheng <famz@redhat.com>"
# Primary key fingerprint: 5003 7CB7 9706 0F76 F021  AD56 CA35 624C 6A91 71C6

* remotes/famz/tags/block-and-testing-pull-request:
  aio-posix: Improve comment around marking node deleted
  tests/vm: Add vm-build-all/vm-clean-all in help text
  tests/vm: Use make's --output-sync option
  tests/vm: Bump guest RAM up from 2G to 4G
  tests/vm: Propagate V=1 down into the make inside the VM
  tests/vm: Pass the jobs parallelism setting to 'make check'
  tests: vm: Add vm-clean-all
  tests: Add centos VM testing
  tests: Allow overriding archive path with SRC_ARCHIVE
  tests: Add an option for snapshot (default: off)
  docker: Install more packages in centos7
  aio: Do aio_notify_accept only during blocking aio_poll
  aio-posix: Don't count ctx->notifier as progress when polling
  nvme: simplify plug/unplug
  nvme: Fix nvme_init error handling
  tests/vm: Add flex and bison to the vm image
  tests/vm: Only use -cpu 'host' if KVM is available
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
...@@ -104,7 +104,7 @@ typedef struct { ...@@ -104,7 +104,7 @@ typedef struct {
uint64_t nsze; /* Namespace size reported by identify command */ uint64_t nsze; /* Namespace size reported by identify command */
int nsid; /* The namespace id to read/write data. */ int nsid; /* The namespace id to read/write data. */
uint64_t max_transfer; uint64_t max_transfer;
int plugged; bool plugged;
CoMutex dma_map_lock; CoMutex dma_map_lock;
CoQueue dma_flush_queue; CoQueue dma_flush_queue;
...@@ -569,13 +569,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, ...@@ -569,13 +569,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->vfio = qemu_vfio_open_pci(device, errp); s->vfio = qemu_vfio_open_pci(device, errp);
if (!s->vfio) { if (!s->vfio) {
ret = -EINVAL; ret = -EINVAL;
goto fail; goto out;
} }
s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp); s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
if (!s->regs) { if (!s->regs) {
ret = -EINVAL; ret = -EINVAL;
goto fail; goto out;
} }
/* Perform initialize sequence as described in NVMe spec "7.6.1 /* Perform initialize sequence as described in NVMe spec "7.6.1
...@@ -585,7 +585,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, ...@@ -585,7 +585,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
if (!(cap & (1ULL << 37))) { if (!(cap & (1ULL << 37))) {
error_setg(errp, "Device doesn't support NVMe command set"); error_setg(errp, "Device doesn't support NVMe command set");
ret = -EINVAL; ret = -EINVAL;
goto fail; goto out;
} }
s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF))); s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
...@@ -603,7 +603,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, ...@@ -603,7 +603,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
PRId64 " ms)", PRId64 " ms)",
timeout_ms); timeout_ms);
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
goto fail; goto out;
} }
} }
...@@ -613,7 +613,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, ...@@ -613,7 +613,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp); s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
if (!s->queues[0]) { if (!s->queues[0]) {
ret = -EINVAL; ret = -EINVAL;
goto fail; goto out;
} }
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000); QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE); s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
...@@ -633,14 +633,14 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, ...@@ -633,14 +633,14 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
PRId64 " ms)", PRId64 " ms)",
timeout_ms); timeout_ms);
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
goto fail_queue; goto out;
} }
} }
ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier, ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
VFIO_PCI_MSIX_IRQ_INDEX, errp); VFIO_PCI_MSIX_IRQ_INDEX, errp);
if (ret) { if (ret) {
goto fail_queue; goto out;
} }
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
false, nvme_handle_event, nvme_poll_cb); false, nvme_handle_event, nvme_poll_cb);
...@@ -649,30 +649,15 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, ...@@ -649,30 +649,15 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
if (local_err) { if (local_err) {
error_propagate(errp, local_err); error_propagate(errp, local_err);
ret = -EIO; ret = -EIO;
goto fail_handler; goto out;
} }
/* Set up command queues. */ /* Set up command queues. */
if (!nvme_add_io_queue(bs, errp)) { if (!nvme_add_io_queue(bs, errp)) {
ret = -EIO; ret = -EIO;
goto fail_handler;
}
return 0;
fail_handler:
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
false, NULL, NULL);
fail_queue:
nvme_free_queue_pair(bs, s->queues[0]);
fail:
g_free(s->queues);
if (s->regs) {
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
} }
if (s->vfio) { out:
qemu_vfio_close(s->vfio); /* Cleaning up is done in nvme_file_open() upon error. */
}
event_notifier_cleanup(&s->irq_notifier);
return ret; return ret;
} }
...@@ -739,8 +724,10 @@ static void nvme_close(BlockDriverState *bs) ...@@ -739,8 +724,10 @@ static void nvme_close(BlockDriverState *bs)
for (i = 0; i < s->nr_queues; ++i) { for (i = 0; i < s->nr_queues; ++i) {
nvme_free_queue_pair(bs, s->queues[i]); nvme_free_queue_pair(bs, s->queues[i]);
} }
g_free(s->queues);
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
false, NULL, NULL); false, NULL, NULL);
event_notifier_cleanup(&s->irq_notifier);
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE); qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
qemu_vfio_close(s->vfio); qemu_vfio_close(s->vfio);
} }
...@@ -1114,7 +1101,8 @@ static void nvme_attach_aio_context(BlockDriverState *bs, ...@@ -1114,7 +1101,8 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
static void nvme_aio_plug(BlockDriverState *bs) static void nvme_aio_plug(BlockDriverState *bs)
{ {
BDRVNVMeState *s = bs->opaque; BDRVNVMeState *s = bs->opaque;
s->plugged++; assert(!s->plugged);
s->plugged = true;
} }
static void nvme_aio_unplug(BlockDriverState *bs) static void nvme_aio_unplug(BlockDriverState *bs)
...@@ -1122,14 +1110,13 @@ static void nvme_aio_unplug(BlockDriverState *bs) ...@@ -1122,14 +1110,13 @@ static void nvme_aio_unplug(BlockDriverState *bs)
int i; int i;
BDRVNVMeState *s = bs->opaque; BDRVNVMeState *s = bs->opaque;
assert(s->plugged); assert(s->plugged);
if (!--s->plugged) { s->plugged = false;
for (i = 1; i < s->nr_queues; i++) { for (i = 1; i < s->nr_queues; i++) {
NVMeQueuePair *q = s->queues[i]; NVMeQueuePair *q = s->queues[i];
qemu_mutex_lock(&q->lock); qemu_mutex_lock(&q->lock);
nvme_kick(s, q); nvme_kick(s, q);
nvme_process_completion(s, q); nvme_process_completion(s, q);
qemu_mutex_unlock(&q->lock); qemu_mutex_unlock(&q->lock);
}
} }
} }
......
...@@ -434,6 +434,7 @@ Debugging ...@@ -434,6 +434,7 @@ Debugging
Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive
debugging and verbose output. If this is not enough, see the next section. debugging and verbose output. If this is not enough, see the next section.
``V=1`` will be propagated down into the make jobs in the guest.
Manual invocation Manual invocation
----------------- -----------------
......
...@@ -27,8 +27,11 @@ DOCKER_SRC_COPY := $(BUILD_DIR)/docker-src.$(CUR_TIME) ...@@ -27,8 +27,11 @@ DOCKER_SRC_COPY := $(BUILD_DIR)/docker-src.$(CUR_TIME)
$(DOCKER_SRC_COPY): $(DOCKER_SRC_COPY):
@mkdir $@ @mkdir $@
$(call quiet-command, cd $(SRC_PATH) && scripts/archive-source.sh $@/qemu.tar, \ $(if $(SRC_ARCHIVE), \
"GEN", "$@/qemu.tar") $(call quiet-command, cp "$(SRC_ARCHIVE)" $@/qemu.tar, \
"CP", "$@/qemu.tar"), \
$(call quiet-command, cd $(SRC_PATH) && scripts/archive-source.sh $@/qemu.tar, \
"GEN", "$@/qemu.tar"))
$(call quiet-command, cp $(SRC_PATH)/tests/docker/run $@/run, \ $(call quiet-command, cp $(SRC_PATH)/tests/docker/run $@/run, \
"COPY","RUNNER") "COPY","RUNNER")
......
...@@ -3,6 +3,7 @@ RUN yum install -y epel-release centos-release-xen ...@@ -3,6 +3,7 @@ RUN yum install -y epel-release centos-release-xen
RUN yum -y update RUN yum -y update
ENV PACKAGES \ ENV PACKAGES \
bison \ bison \
bzip2 \
bzip2-devel \ bzip2-devel \
ccache \ ccache \
csnappy-devel \ csnappy-devel \
...@@ -12,10 +13,12 @@ ENV PACKAGES \ ...@@ -12,10 +13,12 @@ ENV PACKAGES \
gettext \ gettext \
git \ git \
glib2-devel \ glib2-devel \
libaio-devel \
libepoxy-devel \ libepoxy-devel \
libfdt-devel \ libfdt-devel \
librdmacm-devel \ librdmacm-devel \
lzo-devel \ lzo-devel \
nettle-devel \
make \ make \
mesa-libEGL-devel \ mesa-libEGL-devel \
mesa-libgbm-devel \ mesa-libgbm-devel \
......
# Makefile for VM tests # Makefile for VM tests
.PHONY: vm-build-all .PHONY: vm-build-all vm-clean-all
IMAGES := ubuntu.i386 freebsd netbsd openbsd IMAGES := ubuntu.i386 freebsd netbsd openbsd centos
IMAGE_FILES := $(patsubst %, tests/vm/%.img, $(IMAGES)) IMAGE_FILES := $(patsubst %, tests/vm/%.img, $(IMAGES))
.PRECIOUS: $(IMAGE_FILES) .PRECIOUS: $(IMAGE_FILES)
...@@ -14,9 +14,16 @@ vm-test: ...@@ -14,9 +14,16 @@ vm-test:
@echo " vm-build-freebsd - Build QEMU in FreeBSD VM" @echo " vm-build-freebsd - Build QEMU in FreeBSD VM"
@echo " vm-build-netbsd - Build QEMU in NetBSD VM" @echo " vm-build-netbsd - Build QEMU in NetBSD VM"
@echo " vm-build-openbsd - Build QEMU in OpenBSD VM" @echo " vm-build-openbsd - Build QEMU in OpenBSD VM"
@echo " vm-build-centos - Build QEMU in CentOS VM, with Docker"
@echo ""
@echo " vm-build-all - Build QEMU in all VMs"
@echo " vm-clean-all - Clean up VM images"
vm-build-all: $(addprefix vm-build-, $(IMAGES)) vm-build-all: $(addprefix vm-build-, $(IMAGES))
vm-clean-all:
rm -f $(IMAGE_FILES)
tests/vm/%.img: $(SRC_PATH)/tests/vm/% \ tests/vm/%.img: $(SRC_PATH)/tests/vm/% \
$(SRC_PATH)/tests/vm/basevm.py \ $(SRC_PATH)/tests/vm/basevm.py \
$(SRC_PATH)/tests/vm/Makefile.include $(SRC_PATH)/tests/vm/Makefile.include
...@@ -36,6 +43,7 @@ vm-build-%: tests/vm/%.img ...@@ -36,6 +43,7 @@ vm-build-%: tests/vm/%.img
$(if $(V)$(DEBUG), --debug) \ $(if $(V)$(DEBUG), --debug) \
$(if $(DEBUG), --interactive) \ $(if $(DEBUG), --interactive) \
$(if $(J),--jobs $(J)) \ $(if $(J),--jobs $(J)) \
$(if $(V),--verbose) \
--image "$<" \ --image "$<" \
--build-qemu $(SRC_PATH), \ --build-qemu $(SRC_PATH), \
" VM-BUILD $*") " VM-BUILD $*")
......
...@@ -64,8 +64,7 @@ class BaseVM(object): ...@@ -64,8 +64,7 @@ class BaseVM(object):
else: else:
self._stdout = self._devnull self._stdout = self._devnull
self._args = [ \ self._args = [ \
"-nodefaults", "-m", "2G", "-nodefaults", "-m", "4G",
"-cpu", "host",
"-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22", "-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22",
"-device", "virtio-net-pci,netdev=vnet", "-device", "virtio-net-pci,netdev=vnet",
"-vnc", "127.0.0.1:0,to=20", "-vnc", "127.0.0.1:0,to=20",
...@@ -73,9 +72,11 @@ class BaseVM(object): ...@@ -73,9 +72,11 @@ class BaseVM(object):
if vcpus: if vcpus:
self._args += ["-smp", str(vcpus)] self._args += ["-smp", str(vcpus)]
if os.access("/dev/kvm", os.R_OK | os.W_OK): if os.access("/dev/kvm", os.R_OK | os.W_OK):
self._args += ["-cpu", "host"]
self._args += ["-enable-kvm"] self._args += ["-enable-kvm"]
else: else:
logging.info("KVM not available, not using -enable-kvm") logging.info("KVM not available, not using -enable-kvm")
self._args += ["-cpu", "max"]
self._data_args = [] self._data_args = []
def _download_with_cache(self, url, sha256sum=None): def _download_with_cache(self, url, sha256sum=None):
...@@ -210,12 +211,16 @@ def parse_args(vm_name): ...@@ -210,12 +211,16 @@ def parse_args(vm_name):
help="force build image even if image exists") help="force build image even if image exists")
parser.add_option("--jobs", type=int, default=multiprocessing.cpu_count() / 2, parser.add_option("--jobs", type=int, default=multiprocessing.cpu_count() / 2,
help="number of virtual CPUs") help="number of virtual CPUs")
parser.add_option("--verbose", "-V", action="store_true",
help="Pass V=1 to builds within the guest")
parser.add_option("--build-image", "-b", action="store_true", parser.add_option("--build-image", "-b", action="store_true",
help="build image") help="build image")
parser.add_option("--build-qemu", parser.add_option("--build-qemu",
help="build QEMU from source in guest") help="build QEMU from source in guest")
parser.add_option("--interactive", "-I", action="store_true", parser.add_option("--interactive", "-I", action="store_true",
help="Interactively run command") help="Interactively run command")
parser.add_option("--snapshot", "-s", action="store_true",
help="run tests with a snapshot")
parser.disable_interspersed_args() parser.disable_interspersed_args()
return parser.parse_args() return parser.parse_args()
...@@ -238,10 +243,14 @@ def main(vmcls): ...@@ -238,10 +243,14 @@ def main(vmcls):
vm.add_source_dir(args.build_qemu) vm.add_source_dir(args.build_qemu)
cmd = [vm.BUILD_SCRIPT.format( cmd = [vm.BUILD_SCRIPT.format(
configure_opts = " ".join(argv), configure_opts = " ".join(argv),
jobs=args.jobs)] jobs=args.jobs,
verbose = "V=1" if args.verbose else "")]
else: else:
cmd = argv cmd = argv
vm.boot(args.image + ",snapshot=on") img = args.image
if args.snapshot:
img += ",snapshot=on"
vm.boot(img)
vm.wait_ssh() vm.wait_ssh()
except Exception as e: except Exception as e:
if isinstance(e, SystemExit) and e.code == 0: if isinstance(e, SystemExit) and e.code == 0:
......
#!/usr/bin/env python
#
# CentOS image
#
# Copyright 2018 Red Hat Inc.
#
# Authors:
# Fam Zheng <famz@redhat.com>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
import os
import sys
import subprocess
import basevm
import time
class CentosVM(basevm.BaseVM):
name = "centos"
BUILD_SCRIPT = """
set -e;
cd $(mktemp -d);
export SRC_ARCHIVE=/dev/vdb;
sudo chmod a+r $SRC_ARCHIVE;
tar -xf $SRC_ARCHIVE;
make docker-test-block@centos7 V={verbose} J={jobs};
make docker-test-quick@centos7 V={verbose} J={jobs};
make docker-test-mingw@fedora V={verbose} J={jobs};
"""
def _gen_cloud_init_iso(self):
cidir = self._tmpdir
mdata = open(os.path.join(cidir, "meta-data"), "w")
mdata.writelines(["instance-id: centos-vm-0\n",
"local-hostname: centos-guest\n"])
mdata.close()
udata = open(os.path.join(cidir, "user-data"), "w")
udata.writelines(["#cloud-config\n",
"chpasswd:\n",
" list: |\n",
" root:%s\n" % self.ROOT_PASS,
" %s:%s\n" % (self.GUEST_USER, self.GUEST_PASS),
" expire: False\n",
"users:\n",
" - name: %s\n" % self.GUEST_USER,
" sudo: ALL=(ALL) NOPASSWD:ALL\n",
" ssh-authorized-keys:\n",
" - %s\n" % basevm.SSH_PUB_KEY,
" - name: root\n",
" ssh-authorized-keys:\n",
" - %s\n" % basevm.SSH_PUB_KEY,
"locale: en_US.UTF-8\n"])
udata.close()
subprocess.check_call(["genisoimage", "-output", "cloud-init.iso",
"-volid", "cidata", "-joliet", "-rock",
"user-data", "meta-data"],
cwd=cidir,
stdin=self._devnull, stdout=self._stdout,
stderr=self._stdout)
return os.path.join(cidir, "cloud-init.iso")
def build_image(self, img):
cimg = self._download_with_cache("https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1802.qcow2.xz")
img_tmp = img + ".tmp"
subprocess.check_call(["cp", "-f", cimg, img_tmp + ".xz"])
subprocess.check_call(["xz", "-df", img_tmp + ".xz"])
subprocess.check_call(["qemu-img", "resize", img_tmp, "50G"])
self.boot(img_tmp, extra_args = ["-cdrom", self._gen_cloud_init_iso()])
self.wait_ssh()
self.ssh_root_check("touch /etc/cloud/cloud-init.disabled")
self.ssh_root_check("yum update -y")
self.ssh_root_check("yum install -y docker make git")
self.ssh_root_check("systemctl enable docker")
self.ssh_root("poweroff")
self.wait()
if os.path.exists(img):
os.remove(img)
os.rename(img_tmp, img)
return 0
if __name__ == "__main__":
sys.exit(basevm.main(CentosVM))
...@@ -23,8 +23,8 @@ class FreeBSDVM(basevm.BaseVM): ...@@ -23,8 +23,8 @@ class FreeBSDVM(basevm.BaseVM):
cd $(mktemp -d /var/tmp/qemu-test.XXXXXX); cd $(mktemp -d /var/tmp/qemu-test.XXXXXX);
tar -xf /dev/vtbd1; tar -xf /dev/vtbd1;
./configure {configure_opts}; ./configure {configure_opts};
gmake -j{jobs}; gmake --output-sync -j{jobs} {verbose};
gmake check; gmake --output-sync -j{jobs} check {verbose};
""" """
def build_image(self, img): def build_image(self, img):
......
...@@ -23,8 +23,8 @@ class NetBSDVM(basevm.BaseVM): ...@@ -23,8 +23,8 @@ class NetBSDVM(basevm.BaseVM):
cd $(mktemp -d /var/tmp/qemu-test.XXXXXX); cd $(mktemp -d /var/tmp/qemu-test.XXXXXX);
tar -xf /dev/rld1a; tar -xf /dev/rld1a;
./configure --python=python2.7 {configure_opts}; ./configure --python=python2.7 {configure_opts};
gmake -j{jobs}; gmake --output-sync -j{jobs} {verbose};
gmake check; gmake --output-sync -j{jobs} check {verbose};
""" """
def build_image(self, img): def build_image(self, img):
......
...@@ -23,9 +23,9 @@ class OpenBSDVM(basevm.BaseVM): ...@@ -23,9 +23,9 @@ class OpenBSDVM(basevm.BaseVM):
cd $(mktemp -d /var/tmp/qemu-test.XXXXXX); cd $(mktemp -d /var/tmp/qemu-test.XXXXXX);
tar -xf /dev/rsd1c; tar -xf /dev/rsd1c;
./configure --cc=x86_64-unknown-openbsd6.1-gcc-4.9.4 --python=python2.7 {configure_opts}; ./configure --cc=x86_64-unknown-openbsd6.1-gcc-4.9.4 --python=python2.7 {configure_opts};
gmake -j{jobs}; gmake --output-sync -j{jobs} {verbose};
# XXX: "gmake check" seems to always hang or fail # XXX: "gmake check" seems to always hang or fail
#gmake check; #gmake --output-sync -j{jobs} check {verbose};
""" """
def build_image(self, img): def build_image(self, img):
......
...@@ -25,8 +25,8 @@ class UbuntuX86VM(basevm.BaseVM): ...@@ -25,8 +25,8 @@ class UbuntuX86VM(basevm.BaseVM):
sudo chmod a+r /dev/vdb; sudo chmod a+r /dev/vdb;
tar -xf /dev/vdb; tar -xf /dev/vdb;
./configure {configure_opts}; ./configure {configure_opts};
make -j{jobs}; make --output-sync -j{jobs};
make check; make --output-sync check -j{jobs} {verbose};
""" """
def _gen_cloud_init_iso(self): def _gen_cloud_init_iso(self):
...@@ -77,7 +77,7 @@ class UbuntuX86VM(basevm.BaseVM): ...@@ -77,7 +77,7 @@ class UbuntuX86VM(basevm.BaseVM):
# The previous update sometimes doesn't survive a reboot, so do it again # The previous update sometimes doesn't survive a reboot, so do it again
self.ssh_root_check("apt-get update") self.ssh_root_check("apt-get update")
self.ssh_root_check("apt-get build-dep -y qemu") self.ssh_root_check("apt-get build-dep -y qemu")
self.ssh_root_check("apt-get install -y libfdt-dev") self.ssh_root_check("apt-get install -y libfdt-dev flex bison")
self.ssh_root("poweroff") self.ssh_root("poweroff")
self.wait() self.wait()
if os.path.exists(img): if os.path.exists(img):
......
...@@ -232,7 +232,7 @@ void aio_set_fd_handler(AioContext *ctx, ...@@ -232,7 +232,7 @@ void aio_set_fd_handler(AioContext *ctx,
g_source_remove_poll(&ctx->source, &node->pfd); g_source_remove_poll(&ctx->source, &node->pfd);
} }
/* If the lock is held, just mark the node as deleted */ /* If a read is in progress, just mark the node as deleted */
if (qemu_lockcnt_count(&ctx->list_lock)) { if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1; node->deleted = 1;
node->pfd.revents = 0; node->pfd.revents = 0;
...@@ -494,7 +494,8 @@ static bool run_poll_handlers_once(AioContext *ctx) ...@@ -494,7 +494,8 @@ static bool run_poll_handlers_once(AioContext *ctx)
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_poll && if (!node->deleted && node->io_poll &&
aio_node_check(ctx, node->is_external) && aio_node_check(ctx, node->is_external) &&
node->io_poll(node->opaque)) { node->io_poll(node->opaque) &&
node->opaque != &ctx->notifier) {
progress = true; progress = true;
} }
...@@ -590,6 +591,7 @@ bool aio_poll(AioContext *ctx, bool blocking) ...@@ -590,6 +591,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
* so disable the optimization now. * so disable the optimization now.
*/ */
if (blocking) { if (blocking) {
assert(in_aio_context_home_thread(ctx));
atomic_add(&ctx->notify_me, 2); atomic_add(&ctx->notify_me, 2);
} }
...@@ -632,6 +634,7 @@ bool aio_poll(AioContext *ctx, bool blocking) ...@@ -632,6 +634,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
if (blocking) { if (blocking) {
atomic_sub(&ctx->notify_me, 2); atomic_sub(&ctx->notify_me, 2);
aio_notify_accept(ctx);
} }
/* Adjust polling time */ /* Adjust polling time */
...@@ -675,8 +678,6 @@ bool aio_poll(AioContext *ctx, bool blocking) ...@@ -675,8 +678,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
} }
} }
aio_notify_accept(ctx);
/* if we have any readable fds, dispatch event */ /* if we have any readable fds, dispatch event */
if (ret > 0) { if (ret > 0) {
for (i = 0; i < npfd; i++) { for (i = 0; i < npfd; i++) {
......
...@@ -373,11 +373,12 @@ bool aio_poll(AioContext *ctx, bool blocking) ...@@ -373,11 +373,12 @@ bool aio_poll(AioContext *ctx, bool blocking)
ret = WaitForMultipleObjects(count, events, FALSE, timeout); ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) { if (blocking) {
assert(first); assert(first);
assert(in_aio_context_home_thread(ctx));
atomic_sub(&ctx->notify_me, 2); atomic_sub(&ctx->notify_me, 2);
aio_notify_accept(ctx);
} }
if (first) { if (first) {
aio_notify_accept(ctx);
progress |= aio_bh_poll(ctx); progress |= aio_bh_poll(ctx);
first = false; first = false;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册