提交 d8e3b729 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

pc, acpi, virtio

Most notably this includes virtio 1 patches
Still not all devices converted, and not fully spec compliant,
so disabled by default.
Signed-off-by: NMichael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Thu Jun 11 12:53:08 2015 BST using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"

* remotes/mst/tags/for_upstream: (42 commits)
  i386/acpi-build: fix PXB workarounds for unsupported BIOSes
  i386/acpi-build: more traditional _UID and _HID for PXB root buses
  vhost-scsi: move qdev properties into vhost-scsi.c
  virtio-9p-device: move qdev properties into virtio-9p-device.c
  virtio-serial-bus: move qdev properties into virtio-serial-bus.c
  virtio-rng: move qdev properties into virtio-rng.c
  virtio-scsi: move qdev properties into virtio-scsi.c
  virtio-net.h: Remove unsed DEFINE_VIRTIO_NET_PROPERTIES
  virtio-net: move qdev properties into virtio-net.c
  virtio-input: emulated devices [pci]
  virtio-input: core code & base class [pci]
  pci: add PCI_CLASS_INPUT_*
  virtio-pci: fill VirtIOPCIRegions early.
  virtio-pci: drop identical virtio_pci_cap
  virtio-pci: move cap type to VirtIOPCIRegion
  virtio-pci: move virtio_pci_add_mem_cap call to virtio_pci_modern_region_map
  virtio-pci: add virtio_pci_modern_region_map()
  virtio-pci: add virtio_pci_modern_regions_init()
  virtio-pci: add struct VirtIOPCIRegion for virtio-1 regions
  virtio-balloon: switch to virtio_add_feature
  ...
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
......@@ -140,7 +140,8 @@ out:
/* virtio-9p device */
static Property virtio_9p_properties[] = {
DEFINE_VIRTIO_9P_PROPERTIES(V9fsState, fsconf),
DEFINE_PROP_STRING("mount_tag", V9fsState, fsconf.tag),
DEFINE_PROP_STRING("fsdev", V9fsState, fsconf.fsdev_id),
DEFINE_PROP_END_OF_LIST(),
};
......
......@@ -391,8 +391,4 @@ extern int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
#define VIRTIO_9P(obj) \
OBJECT_CHECK(V9fsState, (obj), TYPE_VIRTIO_9P)
#define DEFINE_VIRTIO_9P_PROPERTIES(_state, _field) \
DEFINE_PROP_STRING("mount_tag", _state, _field.tag), \
DEFINE_PROP_STRING("fsdev", _state, _field.fsdev_id)
#endif
......@@ -1083,7 +1083,8 @@ static void virtio_serial_device_unrealize(DeviceState *dev, Error **errp)
}
static Property virtio_serial_properties[] = {
DEFINE_VIRTIO_SERIAL_PROPERTIES(VirtIOSerial, serial),
DEFINE_PROP_UINT32("max_ports", VirtIOSerial, serial.max_virtserial_ports,
31),
DEFINE_PROP_END_OF_LIST(),
};
......
......@@ -833,7 +833,7 @@ static Aml *build_crs(PCIHostState *host,
* Work-around for old bioses
* that do not support multiple root buses
*/
if (range_base || range_base > range_limit) {
if (range_base && range_base <= range_limit) {
aml_append(crs,
aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
AML_POS_DECODE, AML_ENTIRE_RANGE,
......@@ -854,7 +854,7 @@ static Aml *build_crs(PCIHostState *host,
* Work-around for old bioses
* that do not support multiple root buses
*/
if (range_base || range_base > range_limit) {
if (range_base && range_base <= range_limit) {
aml_append(crs,
aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
AML_MAX_FIXED, AML_NON_CACHEABLE,
......@@ -865,7 +865,7 @@ static Aml *build_crs(PCIHostState *host,
0,
range_limit - range_base + 1));
crs_range_insert(mem_ranges, range_base, range_limit);
}
}
range_base =
pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
......@@ -876,7 +876,7 @@ static Aml *build_crs(PCIHostState *host,
* Work-around for old bioses
* that do not support multiple root buses
*/
if (range_base || range_base > range_limit) {
if (range_base && range_base <= range_limit) {
aml_append(crs,
aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
AML_MAX_FIXED, AML_NON_CACHEABLE,
......@@ -945,9 +945,8 @@ build_ssdt(GArray *table_data, GArray *linker,
scope = aml_scope("\\_SB");
dev = aml_device("PC%.02X", bus_num);
aml_append(dev,
aml_name_decl("_UID", aml_string("PC%.02X", bus_num)));
aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A03")));
aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
if (numa_node != NUMA_NODE_UNASSIGNED) {
......
......@@ -52,6 +52,7 @@ static const int kernel_feature_bits[] = {
VIRTIO_RING_F_INDIRECT_DESC,
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_F_VERSION_1,
VHOST_INVALID_FEATURE_BIT
};
......@@ -62,6 +63,7 @@ static const int user_feature_bits[] = {
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_F_ANY_LAYOUT,
VIRTIO_F_VERSION_1,
VIRTIO_NET_F_CSUM,
VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO,
......@@ -107,13 +109,13 @@ static const int *vhost_net_get_feature_bits(struct vhost_net *net)
return feature_bits;
}
unsigned vhost_net_get_features(struct vhost_net *net, unsigned features)
uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features)
{
return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net),
features);
}
void vhost_net_ack_features(struct vhost_net *net, unsigned features)
void vhost_net_ack_features(struct vhost_net *net, uint64_t features)
{
net->dev.acked_features = net->dev.backend_features;
vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features);
......@@ -147,7 +149,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
goto fail;
}
net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend)
? 0 : (1 << VHOST_NET_F_VIRTIO_NET_HDR);
? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR);
net->backend = r;
} else {
net->dev.backend_features = 0;
......@@ -167,7 +169,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
if (backend_kernel) {
if (!qemu_has_vnet_hdr_len(options->net_backend,
sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
net->dev.features &= ~(1 << VIRTIO_NET_F_MRG_RXBUF);
net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF);
}
if (~net->dev.features & net->dev.backend_features) {
fprintf(stderr, "vhost lacks feature mask %" PRIu64
......@@ -431,11 +433,11 @@ void vhost_net_cleanup(struct vhost_net *net)
{
}
unsigned vhost_net_get_features(struct vhost_net *net, unsigned features)
uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features)
{
return features;
}
void vhost_net_ack_features(struct vhost_net *net, unsigned features)
void vhost_net_ack_features(struct vhost_net *net, uint64_t features)
{
}
......
......@@ -87,6 +87,7 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
memcpy(&netcfg, config, n->config_size);
if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) &&
memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
memcpy(n->mac, netcfg.mac, ETH_ALEN);
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
......@@ -366,15 +367,21 @@ static int peer_has_ufo(VirtIONet *n)
return n->has_ufo;
}
static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
int version_1)
{
int i;
NetClientState *nc;
n->mergeable_rx_bufs = mergeable_rx_bufs;
n->guest_hdr_len = n->mergeable_rx_bufs ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
if (version_1) {
n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
n->guest_hdr_len = n->mergeable_rx_bufs ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) :
sizeof(struct virtio_net_hdr);
}
for (i = 0; i < n->max_queues; i++) {
nc = qemu_get_subqueue(n->nic, i);
......@@ -463,6 +470,7 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features)
}
if (!get_vhost_net(nc->peer)) {
virtio_add_feature(&features, VIRTIO_F_VERSION_1);
return features;
}
return vhost_net_get_features(get_vhost_net(nc->peer), features);
......@@ -521,7 +529,9 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
virtio_net_set_mrg_rx_bufs(n,
__virtio_has_feature(features,
VIRTIO_NET_F_MRG_RXBUF));
VIRTIO_NET_F_MRG_RXBUF),
__virtio_has_feature(features,
VIRTIO_F_VERSION_1));
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
......@@ -1374,7 +1384,8 @@ static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
qemu_get_buffer(f, n->mac, ETH_ALEN);
n->vqs[0].tx_waiting = qemu_get_be32(f);
virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f),
virtio_has_feature(vdev, VIRTIO_F_VERSION_1));
if (version_id >= 3)
n->status = qemu_get_be16(f);
......@@ -1626,7 +1637,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->vqs[0].tx_waiting = 0;
n->tx_burst = n->net_conf.txburst;
virtio_net_set_mrg_rx_bufs(n, 0);
virtio_net_set_mrg_rx_bufs(n, 0, 0);
n->promisc = 1; /* for compatibility */
n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
......@@ -1696,10 +1707,50 @@ static void virtio_net_instance_init(Object *obj)
}
static Property virtio_net_properties[] = {
DEFINE_VIRTIO_NET_FEATURES(VirtIONet, host_features),
DEFINE_PROP_BIT("any_layout", VirtIONet, host_features,
VIRTIO_F_ANY_LAYOUT, true),
DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_CSUM, true),
DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_TSO4, true),
DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_TSO6, true),
DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_ECN, true),
DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_UFO, true),
DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
VIRTIO_NET_F_GUEST_ANNOUNCE, true),
DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
VIRTIO_NET_F_HOST_TSO4, true),
DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
VIRTIO_NET_F_HOST_TSO6, true),
DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
VIRTIO_NET_F_HOST_ECN, true),
DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
VIRTIO_NET_F_HOST_UFO, true),
DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
VIRTIO_NET_F_MRG_RXBUF, true),
DEFINE_PROP_BIT("status", VirtIONet, host_features,
VIRTIO_NET_F_STATUS, true),
DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_VQ, true),
DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_RX, true),
DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_VLAN, true),
DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_RX_EXTRA, true),
DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_MAC_ADDR, true),
DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
TX_TIMER_INTERVAL),
TX_TIMER_INTERVAL),
DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
DEFINE_PROP_END_OF_LIST(),
......
......@@ -498,15 +498,19 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
virtio_ccw_stop_ioeventfd(dev);
}
virtio_set_status(vdev, status);
if (vdev->status == 0) {
virtio_reset(vdev);
}
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
virtio_ccw_start_ioeventfd(dev);
if (virtio_set_status(vdev, status) == 0) {
if (vdev->status == 0) {
virtio_reset(vdev);
}
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
virtio_ccw_start_ioeventfd(dev);
}
sch->curr_status.scsw.count = ccw.count - sizeof(status);
ret = 0;
} else {
/* Trigger a command reject. */
ret = -ENOSYS;
}
sch->curr_status.scsw.count = ccw.count - sizeof(status);
ret = 0;
}
break;
case CCW_CMD_SET_IND:
......
......@@ -294,7 +294,14 @@ static char *vhost_scsi_get_fw_dev_path(FWPathProvider *p, BusState *bus,
}
static Property vhost_scsi_properties[] = {
DEFINE_VHOST_SCSI_PROPERTIES(VHostSCSI, parent_obj.conf),
DEFINE_PROP_STRING("vhostfd", VHostSCSI, parent_obj.conf.vhostfd),
DEFINE_PROP_STRING("wwpn", VHostSCSI, parent_obj.conf.wwpn),
DEFINE_PROP_UINT32("boot_tpgt", VHostSCSI, parent_obj.conf.boot_tpgt, 0),
DEFINE_PROP_UINT32("num_queues", VHostSCSI, parent_obj.conf.num_queues, 1),
DEFINE_PROP_UINT32("max_sectors", VHostSCSI, parent_obj.conf.max_sectors,
0xFFFF),
DEFINE_PROP_UINT32("cmd_per_lun", VHostSCSI, parent_obj.conf.cmd_per_lun,
128),
DEFINE_PROP_END_OF_LIST(),
};
......
......@@ -948,8 +948,17 @@ static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp)
}
static Property virtio_scsi_properties[] = {
DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOSCSI, parent_obj.conf),
DEFINE_VIRTIO_SCSI_FEATURES(VirtIOSCSI, host_features),
DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues, 1),
DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
0xFFFF),
DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
128),
DEFINE_PROP_BIT("any_layout", VirtIOSCSI, host_features,
VIRTIO_F_ANY_LAYOUT, true),
DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
VIRTIO_SCSI_F_HOTPLUG, true),
DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
VIRTIO_SCSI_F_CHANGE, true),
DEFINE_PROP_END_OF_LIST(),
};
......
......@@ -157,15 +157,18 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
}
static int get_desc(Vring *vring, VirtQueueElement *elem,
static int get_desc(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
struct vring_desc *desc)
{
unsigned *num;
struct iovec *iov;
hwaddr *addr;
MemoryRegion *mr;
int is_write = virtio_tswap16(vdev, desc->flags) & VRING_DESC_F_WRITE;
uint32_t len = virtio_tswap32(vdev, desc->len);
uint64_t desc_addr = virtio_tswap64(vdev, desc->addr);
if (desc->flags & VRING_DESC_F_WRITE) {
if (is_write) {
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
......@@ -189,18 +192,17 @@ static int get_desc(Vring *vring, VirtQueueElement *elem,
}
/* TODO handle non-contiguous memory across region boundaries */
iov->iov_base = vring_map(&mr, desc->addr, desc->len,
desc->flags & VRING_DESC_F_WRITE);
iov->iov_base = vring_map(&mr, desc_addr, len, is_write);
if (!iov->iov_base) {
error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
(uint64_t)desc->addr, desc->len);
(uint64_t)desc_addr, len);
return -EFAULT;
}
/* The MemoryRegion is looked up again and unref'ed later, leave the
* ref in place. */
iov->iov_len = desc->len;
*addr = desc->addr;
iov->iov_len = len;
*addr = desc_addr;
*num += 1;
return 0;
}
......@@ -222,21 +224,23 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
int ret;
uint32_t len = virtio_tswap32(vdev, indirect->len);
uint64_t addr = virtio_tswap64(vdev, indirect->addr);
/* Sanity check */
if (unlikely(indirect->len % sizeof(desc))) {
if (unlikely(len % sizeof(desc))) {
error_report("Invalid length in indirect descriptor: "
"len %#x not multiple of %#zx",
indirect->len, sizeof(desc));
len, sizeof(desc));
vring->broken = true;
return -EFAULT;
}
count = indirect->len / sizeof(desc);
count = len / sizeof(desc);
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
error_report("Indirect buffer length too big: %d", indirect->len);
error_report("Indirect buffer length too big: %d", len);
vring->broken = true;
return -EFAULT;
}
......@@ -247,12 +251,12 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
/* Translate indirect descriptor */
desc_ptr = vring_map(&mr,
indirect->addr + found * sizeof(desc),
addr + found * sizeof(desc),
sizeof(desc), false);
if (!desc_ptr) {
error_report("Failed to map indirect descriptor "
"addr %#" PRIx64 " len %zu",
(uint64_t)indirect->addr + found * sizeof(desc),
(uint64_t)addr + found * sizeof(desc),
sizeof(desc));
vring->broken = true;
return -EFAULT;
......@@ -270,19 +274,20 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
return -EFAULT;
}
if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
if (unlikely(virtio_tswap16(vdev, desc.flags)
& VRING_DESC_F_INDIRECT)) {
error_report("Nested indirect descriptor");
vring->broken = true;
return -EFAULT;
}
ret = get_desc(vring, elem, &desc);
ret = get_desc(vdev, vring, elem, &desc);
if (ret < 0) {
vring->broken |= (ret == -EFAULT);
return ret;
}
i = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
i = virtio_tswap16(vdev, desc.next);
} while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
return 0;
}
......@@ -383,7 +388,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* Ensure descriptor is loaded before accessing fields */
barrier();
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_INDIRECT) {
ret = get_indirect(vdev, vring, elem, &desc);
if (ret < 0) {
goto out;
......@@ -391,13 +396,13 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
continue;
}
ret = get_desc(vring, elem, &desc);
ret = get_desc(vdev, vring, elem, &desc);
if (ret < 0) {
goto out;
}
i = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
i = virtio_tswap16(vdev, desc.next);
} while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
/* On success, increment avail index. */
vring->last_avail_idx++;
......
......@@ -591,7 +591,7 @@ static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
uint64_t features = dev->acked_features;
int r;
if (enable_log) {
features |= 0x1 << VHOST_F_LOG_ALL;
features |= 0x1ULL << VHOST_F_LOG_ALL;
}
r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
return r < 0 ? -errno : 0;
......@@ -902,7 +902,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
.priority = 10
};
hdev->migration_blocker = NULL;
if (!(hdev->features & (0x1 << VHOST_F_LOG_ALL))) {
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
migrate_add_blocker(hdev->migration_blocker);
......@@ -1045,12 +1045,12 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
assert(r >= 0);
}
unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
unsigned features)
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
uint64_t features)
{
const int *bit = feature_bits;
while (*bit != VHOST_INVALID_FEATURE_BIT) {
unsigned bit_mask = (1 << *bit);
uint64_t bit_mask = (1ULL << *bit);
if (!(hdev->features & bit_mask)) {
features &= ~bit_mask;
}
......@@ -1060,11 +1060,11 @@ unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
}
void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
unsigned features)
uint64_t features)
{
const int *bit = feature_bits;
while (*bit != VHOST_INVALID_FEATURE_BIT) {
unsigned bit_mask = (1 << *bit);
uint64_t bit_mask = (1ULL << *bit);
if (features & bit_mask) {
hdev->acked_features |= bit_mask;
}
......@@ -1114,9 +1114,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
return 0;
fail_log:
if (hdev->log_size) {
vhost_log_put(hdev, false);
}
vhost_log_put(hdev, false);
fail_vq:
while (--i >= 0) {
vhost_virtqueue_stop(hdev,
......
......@@ -312,7 +312,7 @@ static void virtio_balloon_set_config(VirtIODevice *vdev,
static uint64_t virtio_balloon_get_features(VirtIODevice *vdev, uint64_t f)
{
f |= (1 << VIRTIO_BALLOON_F_STATS_VQ);
virtio_add_feature(&f, VIRTIO_BALLOON_F_STATS_VQ);
return f;
}
......
......@@ -333,8 +333,11 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
case VIRTIO_MMIO_QUEUENUM:
DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
virtio_queue_set_num(vdev, vdev->queue_sel, value);
/* Note: only call this function for legacy devices */
virtio_queue_update_rings(vdev, vdev->queue_sel);
break;
case VIRTIO_MMIO_QUEUEALIGN:
/* Note: this is only valid for legacy devices */
virtio_queue_set_align(vdev, vdev->queue_sel, value);
break;
case VIRTIO_MMIO_QUEUEPFN:
......
此差异已折叠。
......@@ -24,6 +24,7 @@
#include "hw/virtio/virtio-balloon.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-9p.h"
#include "hw/virtio/virtio-input.h"
#ifdef CONFIG_VIRTFS
#include "hw/9pfs/virtio-9p.h"
#endif
......@@ -39,6 +40,8 @@ typedef struct VirtIOSerialPCI VirtIOSerialPCI;
typedef struct VirtIONetPCI VirtIONetPCI;
typedef struct VHostSCSIPCI VHostSCSIPCI;
typedef struct VirtIORngPCI VirtIORngPCI;
typedef struct VirtIOInputPCI VirtIOInputPCI;
typedef struct VirtIOInputHIDPCI VirtIOInputHIDPCI;
/* virtio-pci-bus */
......@@ -63,6 +66,12 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
/* virtio version flags */
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT 2
#define VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT 3
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
typedef struct {
MSIMessage msg;
int virq;
......@@ -85,12 +94,38 @@ typedef struct VirtioPCIClass {
void (*realize)(VirtIOPCIProxy *vpci_dev, Error **errp);
} VirtioPCIClass;
typedef struct VirtIOPCIRegion {
MemoryRegion mr;
uint32_t offset;
uint32_t size;
uint32_t type;
} VirtIOPCIRegion;
struct VirtIOPCIProxy {
PCIDevice pci_dev;
MemoryRegion bar;
VirtIOPCIRegion common;
VirtIOPCIRegion isr;
VirtIOPCIRegion device;
VirtIOPCIRegion notify;
MemoryRegion modern_bar;
uint32_t legacy_io_bar;
uint32_t msix_bar;
uint32_t modern_mem_bar;
uint32_t flags;
uint32_t class_code;
uint32_t nvectors;
uint32_t dfselect;
uint32_t gfselect;
uint32_t guest_features[2];
struct {
uint16_t num;
bool enabled;
uint32_t desc[2];
uint32_t avail[2];
uint32_t used[2];
} vqs[VIRTIO_QUEUE_MAX];
bool ioeventfd_disabled;
bool ioeventfd_started;
VirtIOIRQFD *vector_irqfd;
......@@ -202,6 +237,30 @@ struct VirtIORngPCI {
VirtIORNG vdev;
};
/*
* virtio-input-pci: This extends VirtioPCIProxy.
*/
#define TYPE_VIRTIO_INPUT_PCI "virtio-input-pci"
#define VIRTIO_INPUT_PCI(obj) \
OBJECT_CHECK(VirtIOInputPCI, (obj), TYPE_VIRTIO_INPUT_PCI)
struct VirtIOInputPCI {
VirtIOPCIProxy parent_obj;
VirtIOInput vdev;
};
#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci"
#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci"
#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci"
#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci"
#define VIRTIO_INPUT_HID_PCI(obj) \
OBJECT_CHECK(VirtIOInputHIDPCI, (obj), TYPE_VIRTIO_INPUT_HID_PCI)
struct VirtIOInputHIDPCI {
VirtIOPCIProxy parent_obj;
VirtIOInputHID vdev;
};
/* Virtio ABI version, if we increment this, we break the guest driver. */
#define VIRTIO_PCI_ABI_VERSION 0
......
......@@ -219,7 +219,13 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
}
static Property virtio_rng_properties[] = {
DEFINE_VIRTIO_RNG_PROPERTIES(VirtIORNG, conf),
/* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
* you have an entropy source capable of generating more entropy than this
* and you can pass it through via virtio-rng, then hats off to you. Until
* then, this is unlimited for all practical purposes.
*/
DEFINE_PROP_UINT64("max-bytes", VirtIORNG, conf.max_bytes, INT64_MAX),
DEFINE_PROP_UINT32("period", VirtIORNG, conf.period_ms, 1 << 16),
DEFINE_PROP_END_OF_LIST(),
};
......
......@@ -69,7 +69,6 @@ typedef struct VRing
struct VirtQueue
{
VRing vring;
hwaddr pa;
uint16_t last_avail_idx;
/* Last used index value we have signalled on */
uint16_t signalled_used;
......@@ -93,15 +92,18 @@ struct VirtQueue
};
/* virt queue functions */
static void virtqueue_init(VirtQueue *vq)
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
{
hwaddr pa = vq->pa;
VRing *vring = &vdev->vq[n].vring;
vq->vring.desc = pa;
vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
vq->vring.used = vring_align(vq->vring.avail +
offsetof(VRingAvail, ring[vq->vring.num]),
vq->vring.align);
if (!vring->desc) {
/* not yet setup -> nothing to do */
return;
}
vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
vring->used = vring_align(vring->avail +
offsetof(VRingAvail, ring[vring->num]),
vring->align);
}
static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa,
......@@ -542,15 +544,37 @@ void virtio_update_irq(VirtIODevice *vdev)
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
}
void virtio_set_status(VirtIODevice *vdev, uint8_t val)
static int virtio_validate_features(VirtIODevice *vdev)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (k->validate_features) {
return k->validate_features(vdev);
} else {
return 0;
}
}
int virtio_set_status(VirtIODevice *vdev, uint8_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
trace_virtio_set_status(vdev, val);
if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
val & VIRTIO_CONFIG_S_FEATURES_OK) {
int ret = virtio_validate_features(vdev);
if (ret) {
return ret;
}
}
}
if (k->set_status) {
k->set_status(vdev, val);
}
vdev->status = val;
return 0;
}
bool target_words_bigendian(void);
......@@ -605,7 +629,6 @@ void virtio_reset(void *opaque)
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
vdev->vq[i].pa = 0;
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
......@@ -706,15 +729,119 @@ void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
}
}
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint8_t val;
if (addr + sizeof(val) > vdev->config_len) {
return (uint32_t)-1;
}
k->get_config(vdev, vdev->config);
val = ldub_p(vdev->config + addr);
return val;
}
uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint16_t val;
if (addr + sizeof(val) > vdev->config_len) {
return (uint32_t)-1;
}
k->get_config(vdev, vdev->config);
val = lduw_le_p(vdev->config + addr);
return val;
}
uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint32_t val;
if (addr + sizeof(val) > vdev->config_len) {
return (uint32_t)-1;
}
k->get_config(vdev, vdev->config);
val = ldl_le_p(vdev->config + addr);
return val;
}
void virtio_config_modern_writeb(VirtIODevice *vdev,
uint32_t addr, uint32_t data)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint8_t val = data;
if (addr + sizeof(val) > vdev->config_len) {
return;
}
stb_p(vdev->config + addr, val);
if (k->set_config) {
k->set_config(vdev, vdev->config);
}
}
void virtio_config_modern_writew(VirtIODevice *vdev,
uint32_t addr, uint32_t data)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint16_t val = data;
if (addr + sizeof(val) > vdev->config_len) {
return;
}
stw_le_p(vdev->config + addr, val);
if (k->set_config) {
k->set_config(vdev, vdev->config);
}
}
void virtio_config_modern_writel(VirtIODevice *vdev,
uint32_t addr, uint32_t data)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint32_t val = data;
if (addr + sizeof(val) > vdev->config_len) {
return;
}
stl_le_p(vdev->config + addr, val);
if (k->set_config) {
k->set_config(vdev, vdev->config);
}
}
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
{
vdev->vq[n].pa = addr;
virtqueue_init(&vdev->vq[n]);
vdev->vq[n].vring.desc = addr;
virtio_queue_update_rings(vdev, n);
}
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].pa;
return vdev->vq[n].vring.desc;
}
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used)
{
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
}
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
......@@ -728,7 +855,6 @@ void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
return;
}
vdev->vq[n].vring.num = num;
virtqueue_init(&vdev->vq[n]);
}
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
......@@ -771,6 +897,11 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
/* virtio-1 compliant devices cannot change the alignment */
if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
error_report("tried to modify queue alignment for virtio-1 device");
return;
}
/* Check that the transport told us it was going to do this
* (so a buggy transport will immediately assert rather than
* silently failing to migrate this state)
......@@ -778,7 +909,7 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
assert(k->has_variable_vring_alignment);
vdev->vq[n].vring.align = align;
virtqueue_init(&vdev->vq[n]);
virtio_queue_update_rings(vdev, n);
}
void virtio_queue_notify_vq(VirtQueue *vq)
......@@ -895,6 +1026,7 @@ void virtio_notify_config(VirtIODevice *vdev)
return;
vdev->isr |= 0x03;
vdev->generation++;
virtio_notify_vector(vdev, vdev->config_vector);
}
......@@ -903,7 +1035,11 @@ static bool virtio_device_endian_needed(void *opaque)
VirtIODevice *vdev = opaque;
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
return vdev->device_endian != virtio_default_endian();
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
return vdev->device_endian != virtio_default_endian();
}
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
}
static bool virtio_64bit_features_needed(void *opaque)
......@@ -988,7 +1124,8 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
if (k->has_variable_vring_alignment) {
qemu_put_be32(f, vdev->vq[i].vring.align);
}
qemu_put_be64(f, vdev->vq[i].pa);
/* XXX virtio-1 devices */
qemu_put_be64(f, vdev->vq[i].vring.desc);
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
if (k->save_queue) {
k->save_queue(qbus->parent, i, f);
......@@ -1003,7 +1140,7 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
bool bad = (val & ~(vdev->host_features)) != 0;
......@@ -1016,6 +1153,18 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
return bad ? -1 : 0;
}
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
{
/*
* The driver must not attempt to set features after feature negotiation
* has finished.
*/
if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
return -EINVAL;
}
return virtio_set_features_nocheck(vdev, val);
}
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
{
int i, ret;
......@@ -1072,13 +1221,14 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
if (k->has_variable_vring_alignment) {
vdev->vq[i].vring.align = qemu_get_be32(f);
}
vdev->vq[i].pa = qemu_get_be64(f);
vdev->vq[i].vring.desc = qemu_get_be64(f);
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
if (vdev->vq[i].pa) {
virtqueue_init(&vdev->vq[i]);
if (vdev->vq[i].vring.desc) {
/* XXX virtio-1 devices */
virtio_queue_update_rings(vdev, i);
} else if (vdev->vq[i].last_avail_idx) {
error_report("VQ %d address 0x0 "
"inconsistent with Host index 0x%x",
......@@ -1118,14 +1268,14 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
* host_features.
*/
uint64_t features64 = vdev->guest_features;
if (virtio_set_features(vdev, features64) < 0) {
if (virtio_set_features_nocheck(vdev, features64) < 0) {
error_report("Features 0x%" PRIx64 " unsupported. "
"Allowed features: 0x%" PRIx64,
features64, vdev->host_features);
return -1;
}
} else {
if (virtio_set_features(vdev, features) < 0) {
if (virtio_set_features_nocheck(vdev, features) < 0) {
error_report("Features 0x%x unsupported. "
"Allowed features: 0x%" PRIx64,
features, vdev->host_features);
......@@ -1134,7 +1284,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
}
for (i = 0; i < num; i++) {
if (vdev->vq[i].pa) {
if (vdev->vq[i].vring.desc) {
uint16_t nheads;
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing strange things with descriptor numbers. */
......
......@@ -47,6 +47,13 @@
#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
#define PCI_CLASS_INPUT_KEYBOARD 0x0900
#define PCI_CLASS_INPUT_PEN 0x0901
#define PCI_CLASS_INPUT_MOUSE 0x0902
#define PCI_CLASS_INPUT_SCANNER 0x0903
#define PCI_CLASS_INPUT_GAMEPORT 0x0904
#define PCI_CLASS_INPUT_OTHER 0x0980
#define PCI_CLASS_PROCESSOR_CO 0x0b40
#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
......
......@@ -66,13 +66,4 @@ typedef struct VHostSCSI {
int lun;
} VHostSCSI;
#define DEFINE_VHOST_SCSI_PROPERTIES(_state, _conf_field) \
DEFINE_PROP_STRING("vhostfd", _state, _conf_field.vhostfd), \
DEFINE_PROP_STRING("wwpn", _state, _conf_field.wwpn), \
DEFINE_PROP_UINT32("boot_tpgt", _state, _conf_field.boot_tpgt, 0), \
DEFINE_PROP_UINT32("num_queues", _state, _conf_field.num_queues, 1), \
DEFINE_PROP_UINT32("max_sectors", _state, _conf_field.max_sectors, 0xFFFF), \
DEFINE_PROP_UINT32("cmd_per_lun", _state, _conf_field.cmd_per_lun, 128)
#endif
......@@ -78,8 +78,8 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
*/
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
bool mask);
unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
unsigned features);
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
uint64_t features);
void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
unsigned features);
uint64_t features);
#endif
......@@ -19,6 +19,10 @@
static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
{
if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return false;
}
#if defined(TARGET_IS_BIENDIAN)
return virtio_is_big_endian(vdev);
#elif defined(TARGET_WORDS_BIGENDIAN)
......
......@@ -25,6 +25,12 @@
typedef struct virtio_balloon_stat VirtIOBalloonStat;
typedef struct virtio_balloon_stat_modern {
uint16_t tag;
uint8_t reserved[6];
uint64_t val;
} VirtIOBalloonStatModern;
typedef struct VirtIOBalloon {
VirtIODevice parent_obj;
VirtQueue *ivq, *dvq, *svq;
......
......@@ -107,36 +107,7 @@ typedef struct VirtIONet {
* VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
*/
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
#define DEFINE_VIRTIO_NET_FEATURES(_state, _field) \
DEFINE_PROP_BIT("any_layout", _state, _field, VIRTIO_F_ANY_LAYOUT, true), \
DEFINE_PROP_BIT("csum", _state, _field, VIRTIO_NET_F_CSUM, true), \
DEFINE_PROP_BIT("guest_csum", _state, _field, VIRTIO_NET_F_GUEST_CSUM, true), \
DEFINE_PROP_BIT("gso", _state, _field, VIRTIO_NET_F_GSO, true), \
DEFINE_PROP_BIT("guest_tso4", _state, _field, VIRTIO_NET_F_GUEST_TSO4, true), \
DEFINE_PROP_BIT("guest_tso6", _state, _field, VIRTIO_NET_F_GUEST_TSO6, true), \
DEFINE_PROP_BIT("guest_ecn", _state, _field, VIRTIO_NET_F_GUEST_ECN, true), \
DEFINE_PROP_BIT("guest_ufo", _state, _field, VIRTIO_NET_F_GUEST_UFO, true), \
DEFINE_PROP_BIT("guest_announce", _state, _field, VIRTIO_NET_F_GUEST_ANNOUNCE, true), \
DEFINE_PROP_BIT("host_tso4", _state, _field, VIRTIO_NET_F_HOST_TSO4, true), \
DEFINE_PROP_BIT("host_tso6", _state, _field, VIRTIO_NET_F_HOST_TSO6, true), \
DEFINE_PROP_BIT("host_ecn", _state, _field, VIRTIO_NET_F_HOST_ECN, true), \
DEFINE_PROP_BIT("host_ufo", _state, _field, VIRTIO_NET_F_HOST_UFO, true), \
DEFINE_PROP_BIT("mrg_rxbuf", _state, _field, VIRTIO_NET_F_MRG_RXBUF, true), \
DEFINE_PROP_BIT("status", _state, _field, VIRTIO_NET_F_STATUS, true), \
DEFINE_PROP_BIT("ctrl_vq", _state, _field, VIRTIO_NET_F_CTRL_VQ, true), \
DEFINE_PROP_BIT("ctrl_rx", _state, _field, VIRTIO_NET_F_CTRL_RX, true), \
DEFINE_PROP_BIT("ctrl_vlan", _state, _field, VIRTIO_NET_F_CTRL_VLAN, true), \
DEFINE_PROP_BIT("ctrl_rx_extra", _state, _field, VIRTIO_NET_F_CTRL_RX_EXTRA, true), \
DEFINE_PROP_BIT("ctrl_mac_addr", _state, _field, VIRTIO_NET_F_CTRL_MAC_ADDR, true), \
DEFINE_PROP_BIT("ctrl_guest_offloads", _state, _field, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), \
DEFINE_PROP_BIT("mq", _state, _field, VIRTIO_NET_F_MQ, false)
#define DEFINE_VIRTIO_NET_PROPERTIES(_state, _field) \
DEFINE_PROP_UINT32("x-txtimer", _state, _field.txtimer, TX_TIMER_INTERVAL),\
DEFINE_PROP_INT32("x-txburst", _state, _field.txburst, TX_BURST), \
DEFINE_PROP_STRING("tx", _state, _field.tx)
#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
const char *type);
......
......@@ -46,14 +46,4 @@ typedef struct VirtIORNG {
int64_t quota_remaining;
} VirtIORNG;
/* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
you have an entropy source capable of generating more entropy than this
and you can pass it through via virtio-rng, then hats off to you. Until
then, this is unlimited for all practical purposes.
*/
#define DEFINE_VIRTIO_RNG_PROPERTIES(_state, _conf_field) \
DEFINE_PROP_UINT64("max-bytes", _state, _conf_field.max_bytes, \
INT64_MAX), \
DEFINE_PROP_UINT32("period", _state, _conf_field.period_ms, 1 << 16)
#endif
......@@ -141,19 +141,6 @@ typedef struct VirtIOSCSIReq {
} req;
} VirtIOSCSIReq;
#define DEFINE_VIRTIO_SCSI_PROPERTIES(_state, _conf_field) \
DEFINE_PROP_UINT32("num_queues", _state, _conf_field.num_queues, 1), \
DEFINE_PROP_UINT32("max_sectors", _state, _conf_field.max_sectors, 0xFFFF),\
DEFINE_PROP_UINT32("cmd_per_lun", _state, _conf_field.cmd_per_lun, 128)
#define DEFINE_VIRTIO_SCSI_FEATURES(_state, _feature_field) \
DEFINE_PROP_BIT("any_layout", _state, _feature_field, \
VIRTIO_F_ANY_LAYOUT, true), \
DEFINE_PROP_BIT("hotplug", _state, _feature_field, VIRTIO_SCSI_F_HOTPLUG, \
true), \
DEFINE_PROP_BIT("param_change", _state, _feature_field, \
VIRTIO_SCSI_F_CHANGE, true)
typedef void (*HandleOutput)(VirtIODevice *, VirtQueue *);
void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
......
......@@ -221,7 +221,4 @@ void virtio_serial_throttle_port(VirtIOSerialPort *port, bool throttle);
#define VIRTIO_SERIAL(obj) \
OBJECT_CHECK(VirtIOSerial, (obj), TYPE_VIRTIO_SERIAL)
#define DEFINE_VIRTIO_SERIAL_PROPERTIES(_state, _field) \
DEFINE_PROP_UINT32("max_ports", _state, _field.max_virtserial_ports, 31)
#endif
......@@ -78,6 +78,7 @@ struct VirtIODevice
size_t config_len;
void *config;
uint16_t config_vector;
uint32_t generation;
int nvectors;
VirtQueue *vq;
uint16_t device_id;
......@@ -99,6 +100,7 @@ typedef struct VirtioDeviceClass {
uint64_t (*get_features)(VirtIODevice *vdev, uint64_t requested_features);
uint64_t (*bad_features)(VirtIODevice *vdev);
void (*set_features)(VirtIODevice *vdev, uint64_t val);
int (*validate_features)(VirtIODevice *vdev);
void (*get_config)(VirtIODevice *vdev, uint8_t *config);
void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
void (*reset)(VirtIODevice *vdev);
......@@ -172,16 +174,28 @@ uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr);
void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data);
void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data);
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data);
uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr);
uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr);
uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr);
void virtio_config_modern_writeb(VirtIODevice *vdev,
uint32_t addr, uint32_t data);
void virtio_config_modern_writew(VirtIODevice *vdev,
uint32_t addr, uint32_t data);
void virtio_config_modern_writel(VirtIODevice *vdev,
uint32_t addr, uint32_t data);
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr);
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n);
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num);
int virtio_queue_get_num(VirtIODevice *vdev, int n);
int virtio_get_num_queues(VirtIODevice *vdev);
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used);
void virtio_queue_update_rings(VirtIODevice *vdev, int n);
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align);
void virtio_queue_notify(VirtIODevice *vdev, int n);
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector);
void virtio_set_status(VirtIODevice *vdev, uint8_t val);
int virtio_set_status(VirtIODevice *vdev, uint8_t val);
void virtio_reset(void *opaque);
void virtio_update_irq(VirtIODevice *vdev);
int virtio_set_features(VirtIODevice *vdev, uint64_t val);
......@@ -252,7 +266,11 @@ static inline bool virtio_has_feature(VirtIODevice *vdev, unsigned int fbit)
static inline bool virtio_is_big_endian(VirtIODevice *vdev)
{
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
}
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return false;
}
#endif
......@@ -22,8 +22,8 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues);
void vhost_net_cleanup(VHostNetState *net);
unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
void vhost_net_ack_features(VHostNetState *net, unsigned features);
uint64_t vhost_net_get_features(VHostNetState *net, uint64_t features);
void vhost_net_ack_features(VHostNetState *net, uint64_t features);
bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
......
/*
* Virtio PCI driver
*
* This module allows virtio devices to be used over a virtual PCI device.
* This can be used with QEMU based VMMs like KVM or Xen.
*
* Copyright IBM Corp. 2007
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _LINUX_VIRTIO_PCI_H
#define _LINUX_VIRTIO_PCI_H
#include <linux/types.h>
#ifndef VIRTIO_PCI_NO_LEGACY
/* A 32-bit r/o bitmask of the features supported by the host */
#define VIRTIO_PCI_HOST_FEATURES 0
/* A 32-bit r/w bitmask of features activated by the guest */
#define VIRTIO_PCI_GUEST_FEATURES 4
/* A 32-bit r/w PFN for the currently selected queue */
#define VIRTIO_PCI_QUEUE_PFN 8
/* A 16-bit r/o queue size for the currently selected queue */
#define VIRTIO_PCI_QUEUE_NUM 12
/* A 16-bit r/w queue selector */
#define VIRTIO_PCI_QUEUE_SEL 14
/* A 16-bit r/w queue notifier */
#define VIRTIO_PCI_QUEUE_NOTIFY 16
/* An 8-bit device status register. */
#define VIRTIO_PCI_STATUS 18
/* An 8-bit r/o interrupt status register. Reading the value will return the
* current contents of the ISR and will also clear it. This is effectively
* a read-and-acknowledge. */
#define VIRTIO_PCI_ISR 19
/* MSI-X registers: only enabled if MSI-X is enabled. */
/* A 16-bit vector for configuration changes. */
#define VIRTIO_MSI_CONFIG_VECTOR 20
/* A 16-bit vector for selected queue notifications. */
#define VIRTIO_MSI_QUEUE_VECTOR 22
/* The remaining space is defined by each driver as the per-driver
* configuration space */
#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
/* Virtio ABI version, this must match exactly */
#define VIRTIO_PCI_ABI_VERSION 0
/* How many bits to shift physical queue address written to QUEUE_PFN.
* 12 is historical, and due to x86 page size. */
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
/* The alignment to use between consumer and producer parts of vring.
* x86 pagesize again. */
#define VIRTIO_PCI_VRING_ALIGN 4096
#endif /* VIRTIO_PCI_NO_LEGACY */
/* The bit of the ISR which indicates a device configuration change. */
#define VIRTIO_PCI_ISR_CONFIG 0x2
/* Vector value used to disable MSI for queue */
#define VIRTIO_MSI_NO_VECTOR 0xffff
#ifndef VIRTIO_PCI_NO_MODERN
/* IDs for different capabilities. Must all exist. */
/* Common configuration */
#define VIRTIO_PCI_CAP_COMMON_CFG 1
/* Notifications */
#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
/* ISR access */
#define VIRTIO_PCI_CAP_ISR_CFG 3
/* Device specific confiuration */
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
/* This is the PCI capability header: */
struct virtio_pci_cap {
__u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
__u8 cap_next; /* Generic PCI field: next ptr. */
__u8 cap_len; /* Generic PCI field: capability length */
__u8 cfg_type; /* Identifies the structure. */
__u8 bar; /* Where to find it. */
__u8 padding[3]; /* Pad to full dword. */
__le32 offset; /* Offset within bar. */
__le32 length; /* Length of the structure, in bytes. */
};
struct virtio_pci_notify_cap {
struct virtio_pci_cap cap;
__le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
};
/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
struct virtio_pci_common_cfg {
/* About the whole device. */
__le32 device_feature_select; /* read-write */
__le32 device_feature; /* read-only */
__le32 guest_feature_select; /* read-write */
__le32 guest_feature; /* read-write */
__le16 msix_config; /* read-write */
__le16 num_queues; /* read-only */
__u8 device_status; /* read-write */
__u8 config_generation; /* read-only */
/* About a specific virtqueue. */
__le16 queue_select; /* read-write */
__le16 queue_size; /* read-write, power of 2. */
__le16 queue_msix_vector; /* read-write */
__le16 queue_enable; /* read-write */
__le16 queue_notify_off; /* read-only */
__le32 queue_desc_lo; /* read-write */
__le32 queue_desc_hi; /* read-write */
__le32 queue_avail_lo; /* read-write */
__le32 queue_avail_hi; /* read-write */
__le32 queue_used_lo; /* read-write */
__le32 queue_used_hi; /* read-write */
};
/* Macro versions of offsets for the Old Timers! */
#define VIRTIO_PCI_CAP_VNDR 0
#define VIRTIO_PCI_CAP_NEXT 1
#define VIRTIO_PCI_CAP_LEN 2
#define VIRTIO_PCI_CAP_CFG_TYPE 3
#define VIRTIO_PCI_CAP_BAR 4
#define VIRTIO_PCI_CAP_OFFSET 8
#define VIRTIO_PCI_CAP_LENGTH 12
#define VIRTIO_PCI_NOTIFY_CAP_MULT 16
#define VIRTIO_PCI_COMMON_DFSELECT 0
#define VIRTIO_PCI_COMMON_DF 4
#define VIRTIO_PCI_COMMON_GFSELECT 8
#define VIRTIO_PCI_COMMON_GF 12
#define VIRTIO_PCI_COMMON_MSIX 16
#define VIRTIO_PCI_COMMON_NUMQ 18
#define VIRTIO_PCI_COMMON_STATUS 20
#define VIRTIO_PCI_COMMON_CFGGENERATION 21
#define VIRTIO_PCI_COMMON_Q_SELECT 22
#define VIRTIO_PCI_COMMON_Q_SIZE 24
#define VIRTIO_PCI_COMMON_Q_MSIX 26
#define VIRTIO_PCI_COMMON_Q_ENABLE 28
#define VIRTIO_PCI_COMMON_Q_NOFF 30
#define VIRTIO_PCI_COMMON_Q_DESCLO 32
#define VIRTIO_PCI_COMMON_Q_DESCHI 36
#define VIRTIO_PCI_COMMON_Q_AVAILLO 40
#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
#define VIRTIO_PCI_COMMON_Q_USEDLO 48
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
#endif /* VIRTIO_PCI_NO_MODERN */
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册