提交 1d746ee9 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/famz/tags/block-and-testing-pull-request' into staging

Block and testing patches for 3.1

- aio fixes by me
- nvme fixes by Paolo and me
- test improvements by Peter, Phil and me

# gpg: Signature made Wed 15 Aug 2018 04:11:43 BST
# gpg:                using RSA key CA35624C6A9171C6
# gpg: Good signature from "Fam Zheng <famz@redhat.com>"
# Primary key fingerprint: 5003 7CB7 9706 0F76 F021  AD56 CA35 624C 6A91 71C6

* remotes/famz/tags/block-and-testing-pull-request:
  aio-posix: Improve comment around marking node deleted
  tests/vm: Add vm-build-all/vm-clean-all in help text
  tests/vm: Use make's --output-sync option
  tests/vm: Bump guest RAM up from 2G to 4G
  tests/vm: Propagate V=1 down into the make inside the VM
  tests/vm: Pass the jobs parallelism setting to 'make check'
  tests: vm: Add vm-clean-all
  tests: Add centos VM testing
  tests: Allow overriding archive path with SRC_ARCHIVE
  tests: Add an option for snapshot (default: off)
  docker: Install more packages in centos7
  aio: Do aio_notify_accept only during blocking aio_poll
  aio-posix: Don't count ctx->notifier as progress when polling
  nvme: simplify plug/unplug
  nvme: Fix nvme_init error handling
  tests/vm: Add flex and bison to the vm image
  tests/vm: Only use -cpu 'host' if KVM is available
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
......@@ -104,7 +104,7 @@ typedef struct {
uint64_t nsze; /* Namespace size reported by identify command */
int nsid; /* The namespace id to read/write data. */
uint64_t max_transfer;
int plugged;
bool plugged;
CoMutex dma_map_lock;
CoQueue dma_flush_queue;
......@@ -569,13 +569,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->vfio = qemu_vfio_open_pci(device, errp);
if (!s->vfio) {
ret = -EINVAL;
goto fail;
goto out;
}
s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
if (!s->regs) {
ret = -EINVAL;
goto fail;
goto out;
}
/* Perform initialize sequence as described in NVMe spec "7.6.1
......@@ -585,7 +585,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
if (!(cap & (1ULL << 37))) {
error_setg(errp, "Device doesn't support NVMe command set");
ret = -EINVAL;
goto fail;
goto out;
}
s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
......@@ -603,7 +603,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
PRId64 " ms)",
timeout_ms);
ret = -ETIMEDOUT;
goto fail;
goto out;
}
}
......@@ -613,7 +613,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
if (!s->queues[0]) {
ret = -EINVAL;
goto fail;
goto out;
}
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
......@@ -633,14 +633,14 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
PRId64 " ms)",
timeout_ms);
ret = -ETIMEDOUT;
goto fail_queue;
goto out;
}
}
ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
VFIO_PCI_MSIX_IRQ_INDEX, errp);
if (ret) {
goto fail_queue;
goto out;
}
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
false, nvme_handle_event, nvme_poll_cb);
......@@ -649,30 +649,15 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
if (local_err) {
error_propagate(errp, local_err);
ret = -EIO;
goto fail_handler;
goto out;
}
/* Set up command queues. */
if (!nvme_add_io_queue(bs, errp)) {
ret = -EIO;
goto fail_handler;
}
return 0;
fail_handler:
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
false, NULL, NULL);
fail_queue:
nvme_free_queue_pair(bs, s->queues[0]);
fail:
g_free(s->queues);
if (s->regs) {
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
}
if (s->vfio) {
qemu_vfio_close(s->vfio);
}
event_notifier_cleanup(&s->irq_notifier);
out:
/* Cleaning up is done in nvme_file_open() upon error. */
return ret;
}
......@@ -739,8 +724,10 @@ static void nvme_close(BlockDriverState *bs)
for (i = 0; i < s->nr_queues; ++i) {
nvme_free_queue_pair(bs, s->queues[i]);
}
g_free(s->queues);
aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
false, NULL, NULL);
event_notifier_cleanup(&s->irq_notifier);
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
qemu_vfio_close(s->vfio);
}
......@@ -1114,7 +1101,8 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
static void nvme_aio_plug(BlockDriverState *bs)
{
BDRVNVMeState *s = bs->opaque;
s->plugged++;
assert(!s->plugged);
s->plugged = true;
}
static void nvme_aio_unplug(BlockDriverState *bs)
......@@ -1122,14 +1110,13 @@ static void nvme_aio_unplug(BlockDriverState *bs)
int i;
BDRVNVMeState *s = bs->opaque;
assert(s->plugged);
if (!--s->plugged) {
for (i = 1; i < s->nr_queues; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_mutex_lock(&q->lock);
nvme_kick(s, q);
nvme_process_completion(s, q);
qemu_mutex_unlock(&q->lock);
}
s->plugged = false;
for (i = 1; i < s->nr_queues; i++) {
NVMeQueuePair *q = s->queues[i];
qemu_mutex_lock(&q->lock);
nvme_kick(s, q);
nvme_process_completion(s, q);
qemu_mutex_unlock(&q->lock);
}
}
......
......@@ -434,6 +434,7 @@ Debugging
Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive
debugging and verbose output. If this is not enough, see the next section.
``V=1`` will be propagated down into the make jobs in the guest.
Manual invocation
-----------------
......
......@@ -27,8 +27,11 @@ DOCKER_SRC_COPY := $(BUILD_DIR)/docker-src.$(CUR_TIME)
$(DOCKER_SRC_COPY):
@mkdir $@
$(call quiet-command, cd $(SRC_PATH) && scripts/archive-source.sh $@/qemu.tar, \
"GEN", "$@/qemu.tar")
$(if $(SRC_ARCHIVE), \
$(call quiet-command, cp "$(SRC_ARCHIVE)" $@/qemu.tar, \
"CP", "$@/qemu.tar"), \
$(call quiet-command, cd $(SRC_PATH) && scripts/archive-source.sh $@/qemu.tar, \
"GEN", "$@/qemu.tar"))
$(call quiet-command, cp $(SRC_PATH)/tests/docker/run $@/run, \
"COPY","RUNNER")
......
......@@ -3,6 +3,7 @@ RUN yum install -y epel-release centos-release-xen
RUN yum -y update
ENV PACKAGES \
bison \
bzip2 \
bzip2-devel \
ccache \
csnappy-devel \
......@@ -12,10 +13,12 @@ ENV PACKAGES \
gettext \
git \
glib2-devel \
libaio-devel \
libepoxy-devel \
libfdt-devel \
librdmacm-devel \
lzo-devel \
nettle-devel \
make \
mesa-libEGL-devel \
mesa-libgbm-devel \
......
# Makefile for VM tests
.PHONY: vm-build-all
.PHONY: vm-build-all vm-clean-all
IMAGES := ubuntu.i386 freebsd netbsd openbsd
IMAGES := ubuntu.i386 freebsd netbsd openbsd centos
IMAGE_FILES := $(patsubst %, tests/vm/%.img, $(IMAGES))
.PRECIOUS: $(IMAGE_FILES)
......@@ -14,9 +14,16 @@ vm-test:
@echo " vm-build-freebsd - Build QEMU in FreeBSD VM"
@echo " vm-build-netbsd - Build QEMU in NetBSD VM"
@echo " vm-build-openbsd - Build QEMU in OpenBSD VM"
@echo " vm-build-centos - Build QEMU in CentOS VM, with Docker"
@echo ""
@echo " vm-build-all - Build QEMU in all VMs"
@echo " vm-clean-all - Clean up VM images"
vm-build-all: $(addprefix vm-build-, $(IMAGES))
vm-clean-all:
rm -f $(IMAGE_FILES)
tests/vm/%.img: $(SRC_PATH)/tests/vm/% \
$(SRC_PATH)/tests/vm/basevm.py \
$(SRC_PATH)/tests/vm/Makefile.include
......@@ -36,6 +43,7 @@ vm-build-%: tests/vm/%.img
$(if $(V)$(DEBUG), --debug) \
$(if $(DEBUG), --interactive) \
$(if $(J),--jobs $(J)) \
$(if $(V),--verbose) \
--image "$<" \
--build-qemu $(SRC_PATH), \
" VM-BUILD $*")
......
......@@ -64,8 +64,7 @@ class BaseVM(object):
else:
self._stdout = self._devnull
self._args = [ \
"-nodefaults", "-m", "2G",
"-cpu", "host",
"-nodefaults", "-m", "4G",
"-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22",
"-device", "virtio-net-pci,netdev=vnet",
"-vnc", "127.0.0.1:0,to=20",
......@@ -73,9 +72,11 @@ class BaseVM(object):
if vcpus:
self._args += ["-smp", str(vcpus)]
if os.access("/dev/kvm", os.R_OK | os.W_OK):
self._args += ["-cpu", "host"]
self._args += ["-enable-kvm"]
else:
logging.info("KVM not available, not using -enable-kvm")
self._args += ["-cpu", "max"]
self._data_args = []
def _download_with_cache(self, url, sha256sum=None):
......@@ -210,12 +211,16 @@ def parse_args(vm_name):
help="force build image even if image exists")
parser.add_option("--jobs", type=int, default=multiprocessing.cpu_count() / 2,
help="number of virtual CPUs")
parser.add_option("--verbose", "-V", action="store_true",
help="Pass V=1 to builds within the guest")
parser.add_option("--build-image", "-b", action="store_true",
help="build image")
parser.add_option("--build-qemu",
help="build QEMU from source in guest")
parser.add_option("--interactive", "-I", action="store_true",
help="Interactively run command")
parser.add_option("--snapshot", "-s", action="store_true",
help="run tests with a snapshot")
parser.disable_interspersed_args()
return parser.parse_args()
......@@ -238,10 +243,14 @@ def main(vmcls):
vm.add_source_dir(args.build_qemu)
cmd = [vm.BUILD_SCRIPT.format(
configure_opts = " ".join(argv),
jobs=args.jobs)]
jobs=args.jobs,
verbose = "V=1" if args.verbose else "")]
else:
cmd = argv
vm.boot(args.image + ",snapshot=on")
img = args.image
if args.snapshot:
img += ",snapshot=on"
vm.boot(img)
vm.wait_ssh()
except Exception as e:
if isinstance(e, SystemExit) and e.code == 0:
......
#!/usr/bin/env python
#
# CentOS image
#
# Copyright 2018 Red Hat Inc.
#
# Authors:
# Fam Zheng <famz@redhat.com>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
import os
import sys
import subprocess
import basevm
import time
class CentosVM(basevm.BaseVM):
name = "centos"
BUILD_SCRIPT = """
set -e;
cd $(mktemp -d);
export SRC_ARCHIVE=/dev/vdb;
sudo chmod a+r $SRC_ARCHIVE;
tar -xf $SRC_ARCHIVE;
make docker-test-block@centos7 V={verbose} J={jobs};
make docker-test-quick@centos7 V={verbose} J={jobs};
make docker-test-mingw@fedora V={verbose} J={jobs};
"""
def _gen_cloud_init_iso(self):
cidir = self._tmpdir
mdata = open(os.path.join(cidir, "meta-data"), "w")
mdata.writelines(["instance-id: centos-vm-0\n",
"local-hostname: centos-guest\n"])
mdata.close()
udata = open(os.path.join(cidir, "user-data"), "w")
udata.writelines(["#cloud-config\n",
"chpasswd:\n",
" list: |\n",
" root:%s\n" % self.ROOT_PASS,
" %s:%s\n" % (self.GUEST_USER, self.GUEST_PASS),
" expire: False\n",
"users:\n",
" - name: %s\n" % self.GUEST_USER,
" sudo: ALL=(ALL) NOPASSWD:ALL\n",
" ssh-authorized-keys:\n",
" - %s\n" % basevm.SSH_PUB_KEY,
" - name: root\n",
" ssh-authorized-keys:\n",
" - %s\n" % basevm.SSH_PUB_KEY,
"locale: en_US.UTF-8\n"])
udata.close()
subprocess.check_call(["genisoimage", "-output", "cloud-init.iso",
"-volid", "cidata", "-joliet", "-rock",
"user-data", "meta-data"],
cwd=cidir,
stdin=self._devnull, stdout=self._stdout,
stderr=self._stdout)
return os.path.join(cidir, "cloud-init.iso")
def build_image(self, img):
cimg = self._download_with_cache("https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1802.qcow2.xz")
img_tmp = img + ".tmp"
subprocess.check_call(["cp", "-f", cimg, img_tmp + ".xz"])
subprocess.check_call(["xz", "-df", img_tmp + ".xz"])
subprocess.check_call(["qemu-img", "resize", img_tmp, "50G"])
self.boot(img_tmp, extra_args = ["-cdrom", self._gen_cloud_init_iso()])
self.wait_ssh()
self.ssh_root_check("touch /etc/cloud/cloud-init.disabled")
self.ssh_root_check("yum update -y")
self.ssh_root_check("yum install -y docker make git")
self.ssh_root_check("systemctl enable docker")
self.ssh_root("poweroff")
self.wait()
if os.path.exists(img):
os.remove(img)
os.rename(img_tmp, img)
return 0
if __name__ == "__main__":
sys.exit(basevm.main(CentosVM))
......@@ -23,8 +23,8 @@ class FreeBSDVM(basevm.BaseVM):
cd $(mktemp -d /var/tmp/qemu-test.XXXXXX);
tar -xf /dev/vtbd1;
./configure {configure_opts};
gmake -j{jobs};
gmake check;
gmake --output-sync -j{jobs} {verbose};
gmake --output-sync -j{jobs} check {verbose};
"""
def build_image(self, img):
......
......@@ -23,8 +23,8 @@ class NetBSDVM(basevm.BaseVM):
cd $(mktemp -d /var/tmp/qemu-test.XXXXXX);
tar -xf /dev/rld1a;
./configure --python=python2.7 {configure_opts};
gmake -j{jobs};
gmake check;
gmake --output-sync -j{jobs} {verbose};
gmake --output-sync -j{jobs} check {verbose};
"""
def build_image(self, img):
......
......@@ -23,9 +23,9 @@ class OpenBSDVM(basevm.BaseVM):
cd $(mktemp -d /var/tmp/qemu-test.XXXXXX);
tar -xf /dev/rsd1c;
./configure --cc=x86_64-unknown-openbsd6.1-gcc-4.9.4 --python=python2.7 {configure_opts};
gmake -j{jobs};
gmake --output-sync -j{jobs} {verbose};
# XXX: "gmake check" seems to always hang or fail
#gmake check;
#gmake --output-sync -j{jobs} check {verbose};
"""
def build_image(self, img):
......
......@@ -25,8 +25,8 @@ class UbuntuX86VM(basevm.BaseVM):
sudo chmod a+r /dev/vdb;
tar -xf /dev/vdb;
./configure {configure_opts};
make -j{jobs};
make check;
make --output-sync -j{jobs};
make --output-sync check -j{jobs} {verbose};
"""
def _gen_cloud_init_iso(self):
......@@ -77,7 +77,7 @@ class UbuntuX86VM(basevm.BaseVM):
# The previous update sometimes doesn't survive a reboot, so do it again
self.ssh_root_check("apt-get update")
self.ssh_root_check("apt-get build-dep -y qemu")
self.ssh_root_check("apt-get install -y libfdt-dev")
self.ssh_root_check("apt-get install -y libfdt-dev flex bison")
self.ssh_root("poweroff")
self.wait()
if os.path.exists(img):
......
......@@ -232,7 +232,7 @@ void aio_set_fd_handler(AioContext *ctx,
g_source_remove_poll(&ctx->source, &node->pfd);
}
/* If the lock is held, just mark the node as deleted */
/* If a read is in progress, just mark the node as deleted */
if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1;
node->pfd.revents = 0;
......@@ -494,7 +494,8 @@ static bool run_poll_handlers_once(AioContext *ctx)
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_poll &&
aio_node_check(ctx, node->is_external) &&
node->io_poll(node->opaque)) {
node->io_poll(node->opaque) &&
node->opaque != &ctx->notifier) {
progress = true;
}
......@@ -590,6 +591,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
* so disable the optimization now.
*/
if (blocking) {
assert(in_aio_context_home_thread(ctx));
atomic_add(&ctx->notify_me, 2);
}
......@@ -632,6 +634,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
if (blocking) {
atomic_sub(&ctx->notify_me, 2);
aio_notify_accept(ctx);
}
/* Adjust polling time */
......@@ -675,8 +678,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
}
aio_notify_accept(ctx);
/* if we have any readable fds, dispatch event */
if (ret > 0) {
for (i = 0; i < npfd; i++) {
......
......@@ -373,11 +373,12 @@ bool aio_poll(AioContext *ctx, bool blocking)
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) {
assert(first);
assert(in_aio_context_home_thread(ctx));
atomic_sub(&ctx->notify_me, 2);
aio_notify_accept(ctx);
}
if (first) {
aio_notify_accept(ctx);
progress |= aio_bh_poll(ctx);
first = false;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册