virtio-net.c 66.5 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio Network Device
 *
 * Copyright IBM, Corp. 2007
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

P
Peter Maydell 已提交
14
#include "qemu/osdep.h"
15
#include "qemu/iov.h"
P
Paolo Bonzini 已提交
16
#include "hw/virtio/virtio.h"
P
Paolo Bonzini 已提交
17
#include "net/net.h"
18
#include "net/checksum.h"
19
#include "net/tap.h"
20 21
#include "qemu/error-report.h"
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
22 23
#include "hw/virtio/virtio-net.h"
#include "net/vhost_net.h"
24
#include "hw/virtio/virtio-bus.h"
25
#include "qapi/qmp/qjson.h"
26
#include "qapi-event.h"
27
#include "hw/virtio/virtio-access.h"
28
#include "migration/misc.h"
A
aliguori 已提交
29

30
#define VIRTIO_NET_VM_VERSION    11
31

32
#define MAC_TABLE_ENTRIES    64
33
#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
34

35 36
/* previously fixed value */
#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
37 38
#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256

39 40
/* for now, only allow larger queues; with virtio-1, guest can downsize */
#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
41
#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
42

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * Calculate the number of bytes up to and including the given 'field' of
 * 'container'.
 */
#define endof(container, field) \
    (offsetof(container, field) + sizeof(((container *)0)->field))

typedef struct VirtIOFeature {
    uint32_t flags;
    size_t end;
} VirtIOFeature;

static VirtIOFeature feature_sizes[] = {
    {.flags = 1 << VIRTIO_NET_F_MAC,
     .end = endof(struct virtio_net_config, mac)},
    {.flags = 1 << VIRTIO_NET_F_STATUS,
     .end = endof(struct virtio_net_config, status)},
    {.flags = 1 << VIRTIO_NET_F_MQ,
     .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
62 63
    {.flags = 1 << VIRTIO_NET_F_MTU,
     .end = endof(struct virtio_net_config, mtu)},
64 65 66
    {}
};

J
Jason Wang 已提交
67
static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
68 69 70
{
    VirtIONet *n = qemu_get_nic_opaque(nc);

J
Jason Wang 已提交
71
    return &n->vqs[nc->queue_index];
72
}
J
Jason Wang 已提交
73 74 75 76 77 78

static int vq2q(int queue_index)
{
    return queue_index / 2;
}

A
aliguori 已提交
79 80 81 82
/* TODO
 * - we could suppress RX interrupt if we were so inclined.
 */

83
static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
A
aliguori 已提交
84
{
85
    VirtIONet *n = VIRTIO_NET(vdev);
A
aliguori 已提交
86 87
    struct virtio_net_config netcfg;

88 89
    virtio_stw_p(vdev, &netcfg.status, n->status);
    virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
90
    virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
91
    memcpy(netcfg.mac, n->mac, ETH_ALEN);
92
    memcpy(config, &netcfg, n->config_size);
A
aliguori 已提交
93 94
}

95 96
static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
{
97
    VirtIONet *n = VIRTIO_NET(vdev);
98
    struct virtio_net_config netcfg = {};
99

100
    memcpy(&netcfg, config, n->config_size);
101

102 103
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
        !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
104
        memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
105
        memcpy(n->mac, netcfg.mac, ETH_ALEN);
J
Jason Wang 已提交
106
        qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
107 108 109
    }
}

110 111
static bool virtio_net_started(VirtIONet *n, uint8_t status)
{
112
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
113
    return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
114
        (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
115 116
}

J
Jason Wang 已提交
117 118 119 120 121 122 123 124 125 126
static void virtio_net_announce_timer(void *opaque)
{
    VirtIONet *n = opaque;
    VirtIODevice *vdev = VIRTIO_DEVICE(n);

    n->announce_counter--;
    n->status |= VIRTIO_NET_S_ANNOUNCE;
    virtio_notify_config(vdev);
}

127
static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
128
{
129
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
J
Jason Wang 已提交
130
    NetClientState *nc = qemu_get_queue(n->nic);
J
Jason Wang 已提交
131
    int queues = n->multiqueue ? n->max_queues : 1;
J
Jason Wang 已提交
132

133
    if (!get_vhost_net(nc->peer)) {
134 135
        return;
    }
J
Jason Wang 已提交
136

137 138
    if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
        !!n->vhost_started) {
139 140 141
        return;
    }
    if (!n->vhost_started) {
142 143
        int r, i;

144 145 146 147 148 149 150
        if (n->needs_vnet_hdr_swap) {
            error_report("backend does not support %s vnet headers; "
                         "falling back on userspace virtio",
                         virtio_is_big_endian(vdev) ? "BE" : "LE");
            return;
        }

151 152 153 154 155 156 157 158 159 160 161
        /* Any packets outstanding? Purge them to avoid touching rings
         * when vhost is running.
         */
        for (i = 0;  i < queues; i++) {
            NetClientState *qnc = qemu_get_subqueue(n->nic, i);

            /* Purge both directions: TX and RX. */
            qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
            qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
        }

162 163 164 165 166 167 168 169 170 171
        if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
            r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
            if (r < 0) {
                error_report("%uBytes MTU not supported by the backend",
                             n->net_conf.mtu);

                return;
            }
        }

172
        n->vhost_started = 1;
173
        r = vhost_net_start(vdev, n->nic->ncs, queues);
174
        if (r < 0) {
175 176
            error_report("unable to start vhost net: %d: "
                         "falling back on userspace virtio", -r);
177
            n->vhost_started = 0;
178 179
        }
    } else {
180
        vhost_net_stop(vdev, n->nic->ncs, queues);
181 182 183 184
        n->vhost_started = 0;
    }
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
                                          NetClientState *peer,
                                          bool enable)
{
    if (virtio_is_big_endian(vdev)) {
        return qemu_set_vnet_be(peer, enable);
    } else {
        return qemu_set_vnet_le(peer, enable);
    }
}

static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
                                       int queues, bool enable)
{
    int i;

    for (i = 0; i < queues; i++) {
        if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
            enable) {
            while (--i >= 0) {
                virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
            }

            return true;
        }
    }

    return false;
}

static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    int queues = n->multiqueue ? n->max_queues : 1;

    if (virtio_net_started(n, status)) {
        /* Before using the device, we tell the network backend about the
         * endianness to use when parsing vnet headers. If the backend
         * can't do it, we fallback onto fixing the headers in the core
         * virtio-net code.
         */
        n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
                                                            queues, true);
    } else if (virtio_net_started(n, vdev->status)) {
        /* After using the device, we need to reset the network backend to
         * the default (guest native endianness), otherwise the guest may
         * lose network connectivity if it is rebooted into a different
         * endianness.
         */
        virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
    }
}

238 239 240 241 242 243 244 245
static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
{
    unsigned int dropped = virtqueue_drop_all(vq);
    if (dropped) {
        virtio_notify(vdev, vq);
    }
}

246 247
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
248
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
249 250 251
    VirtIONetQueue *q;
    int i;
    uint8_t queue_status;
252

253
    virtio_net_vnet_endian_status(n, status);
254 255
    virtio_net_vhost_status(n, status);

J
Jason Wang 已提交
256
    for (i = 0; i < n->max_queues; i++) {
257 258
        NetClientState *ncs = qemu_get_subqueue(n->nic, i);
        bool queue_started;
J
Jason Wang 已提交
259
        q = &n->vqs[i];
260

J
Jason Wang 已提交
261 262
        if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
            queue_status = 0;
263
        } else {
J
Jason Wang 已提交
264
            queue_status = status;
265
        }
266 267 268 269 270 271
        queue_started =
            virtio_net_started(n, queue_status) && !n->vhost_started;

        if (queue_started) {
            qemu_flush_queued_packets(ncs);
        }
J
Jason Wang 已提交
272 273 274 275 276

        if (!q->tx_waiting) {
            continue;
        }

277
        if (queue_started) {
J
Jason Wang 已提交
278
            if (q->tx_timer) {
279 280
                timer_mod(q->tx_timer,
                               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
J
Jason Wang 已提交
281 282 283
            } else {
                qemu_bh_schedule(q->tx_bh);
            }
284
        } else {
J
Jason Wang 已提交
285
            if (q->tx_timer) {
286
                timer_del(q->tx_timer);
J
Jason Wang 已提交
287 288 289
            } else {
                qemu_bh_cancel(q->tx_bh);
            }
290 291 292 293 294 295 296 297
            if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
                (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
                /* if tx is waiting we are likely have some packets in tx queue
                 * and disabled notification */
                q->tx_waiting = 0;
                virtio_queue_set_notification(q->tx_vq, 1);
                virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
            }
298 299 300 301
        }
    }
}

302
static void virtio_net_set_link_status(NetClientState *nc)
303
{
J
Jason Wang 已提交
304
    VirtIONet *n = qemu_get_nic_opaque(nc);
305
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
306 307
    uint16_t old_status = n->status;

M
Mark McLoughlin 已提交
308
    if (nc->link_down)
309 310 311 312 313
        n->status &= ~VIRTIO_NET_S_LINK_UP;
    else
        n->status |= VIRTIO_NET_S_LINK_UP;

    if (n->status != old_status)
314
        virtio_notify_config(vdev);
315

316
    virtio_net_set_status(vdev, vdev->status);
317 318
}

319 320 321 322 323
static void rxfilter_notify(NetClientState *nc)
{
    VirtIONet *n = qemu_get_nic_opaque(nc);

    if (nc->rxfilter_notify_enabled) {
324
        gchar *path = object_get_canonical_path(OBJECT(n->qdev));
325 326
        qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
                                              n->netclient_name, path, &error_abort);
327
        g_free(path);
328 329 330 331 332 333

        /* disable event notification to avoid events flooding */
        nc->rxfilter_notify_enabled = 0;
    }
}

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
static intList *get_vlan_table(VirtIONet *n)
{
    intList *list, *entry;
    int i, j;

    list = NULL;
    for (i = 0; i < MAX_VLAN >> 5; i++) {
        for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
            if (n->vlans[i] & (1U << j)) {
                entry = g_malloc0(sizeof(*entry));
                entry->value = (i << 5) + j;
                entry->next = list;
                list = entry;
            }
        }
    }

    return list;
}

354 355 356
static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
{
    VirtIONet *n = qemu_get_nic_opaque(nc);
357
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
358 359
    RxFilterInfo *info;
    strList *str_list, *entry;
360
    int i;
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385

    info = g_malloc0(sizeof(*info));
    info->name = g_strdup(nc->name);
    info->promiscuous = n->promisc;

    if (n->nouni) {
        info->unicast = RX_STATE_NONE;
    } else if (n->alluni) {
        info->unicast = RX_STATE_ALL;
    } else {
        info->unicast = RX_STATE_NORMAL;
    }

    if (n->nomulti) {
        info->multicast = RX_STATE_NONE;
    } else if (n->allmulti) {
        info->multicast = RX_STATE_ALL;
    } else {
        info->multicast = RX_STATE_NORMAL;
    }

    info->broadcast_allowed = n->nobcast;
    info->multicast_overflow = n->mac_table.multi_overflow;
    info->unicast_overflow = n->mac_table.uni_overflow;

386
    info->main_mac = qemu_mac_strdup_printf(n->mac);
387 388 389 390

    str_list = NULL;
    for (i = 0; i < n->mac_table.first_multi; i++) {
        entry = g_malloc0(sizeof(*entry));
391
        entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
392 393 394 395 396 397 398 399
        entry->next = str_list;
        str_list = entry;
    }
    info->unicast_table = str_list;

    str_list = NULL;
    for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
        entry = g_malloc0(sizeof(*entry));
400
        entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
401 402 403 404
        entry->next = str_list;
        str_list = entry;
    }
    info->multicast_table = str_list;
405
    info->vlan_table = get_vlan_table(n);
406

407
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
408 409 410 411 412
        info->vlan = RX_STATE_ALL;
    } else if (!info->vlan_table) {
        info->vlan = RX_STATE_NONE;
    } else {
        info->vlan = RX_STATE_NORMAL;
413 414 415 416 417 418 419 420
    }

    /* enable event notification after query */
    nc->rxfilter_notify_enabled = 1;

    return info;
}

421 422
static void virtio_net_reset(VirtIODevice *vdev)
{
423
    VirtIONet *n = VIRTIO_NET(vdev);
424 425 426 427

    /* Reset back to compatibility mode */
    n->promisc = 1;
    n->allmulti = 0;
428 429 430 431
    n->alluni = 0;
    n->nomulti = 0;
    n->nouni = 0;
    n->nobcast = 0;
J
Jason Wang 已提交
432 433
    /* multiqueue is disabled by default */
    n->curr_queues = 1;
J
Jason Wang 已提交
434 435 436
    timer_del(n->announce_timer);
    n->announce_counter = 0;
    n->status &= ~VIRTIO_NET_S_ANNOUNCE;
437

438
    /* Flush any MAC and VLAN filter table state */
439
    n->mac_table.in_use = 0;
440
    n->mac_table.first_multi = 0;
441 442
    n->mac_table.multi_overflow = 0;
    n->mac_table.uni_overflow = 0;
443
    memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
444
    memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
445
    qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
446
    memset(n->vlans, 0, MAX_VLAN >> 3);
447 448
}

449
static void peer_test_vnet_hdr(VirtIONet *n)
M
Mark McLoughlin 已提交
450
{
J
Jason Wang 已提交
451 452
    NetClientState *nc = qemu_get_queue(n->nic);
    if (!nc->peer) {
453
        return;
J
Jason Wang 已提交
454
    }
M
Mark McLoughlin 已提交
455

456
    n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
457
}
M
Mark McLoughlin 已提交
458

459 460
static int peer_has_vnet_hdr(VirtIONet *n)
{
M
Mark McLoughlin 已提交
461 462 463
    return n->has_vnet_hdr;
}

464 465 466 467 468
static int peer_has_ufo(VirtIONet *n)
{
    if (!peer_has_vnet_hdr(n))
        return 0;

469
    n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
470 471 472 473

    return n->has_ufo;
}

474 475
static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
                                       int version_1)
476
{
J
Jason Wang 已提交
477 478 479
    int i;
    NetClientState *nc;

480 481
    n->mergeable_rx_bufs = mergeable_rx_bufs;

482 483 484 485 486 487 488
    if (version_1) {
        n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
    } else {
        n->guest_hdr_len = n->mergeable_rx_bufs ?
            sizeof(struct virtio_net_hdr_mrg_rxbuf) :
            sizeof(struct virtio_net_hdr);
    }
489

J
Jason Wang 已提交
490 491 492 493
    for (i = 0; i < n->max_queues; i++) {
        nc = qemu_get_subqueue(n->nic, i);

        if (peer_has_vnet_hdr(n) &&
494 495
            qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
            qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
J
Jason Wang 已提交
496 497
            n->host_hdr_len = n->guest_hdr_len;
        }
498 499 500
    }
}

J
Jason Wang 已提交
501 502 503 504 505 506 507 508
static int peer_attach(VirtIONet *n, int index)
{
    NetClientState *nc = qemu_get_subqueue(n->nic, index);

    if (!nc->peer) {
        return 0;
    }

509
    if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
510 511 512
        vhost_set_vring_enable(nc->peer, 1);
    }

513
    if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
J
Jason Wang 已提交
514 515 516
        return 0;
    }

517 518 519 520
    if (n->max_queues == 1) {
        return 0;
    }

J
Jason Wang 已提交
521 522 523 524 525 526 527 528 529 530 531
    return tap_enable(nc->peer);
}

static int peer_detach(VirtIONet *n, int index)
{
    NetClientState *nc = qemu_get_subqueue(n->nic, index);

    if (!nc->peer) {
        return 0;
    }

532
    if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
533 534 535
        vhost_set_vring_enable(nc->peer, 0);
    }

536
    if (nc->peer->info->type !=  NET_CLIENT_DRIVER_TAP) {
J
Jason Wang 已提交
537 538 539 540 541 542 543 544 545
        return 0;
    }

    return tap_disable(nc->peer);
}

static void virtio_net_set_queues(VirtIONet *n)
{
    int i;
546
    int r;
J
Jason Wang 已提交
547

548 549 550 551
    if (n->nic->peer_deleted) {
        return;
    }

J
Jason Wang 已提交
552 553
    for (i = 0; i < n->max_queues; i++) {
        if (i < n->curr_queues) {
554 555
            r = peer_attach(n, i);
            assert(!r);
J
Jason Wang 已提交
556
        } else {
557 558
            r = peer_detach(n, i);
            assert(!r);
J
Jason Wang 已提交
559 560 561 562
        }
    }
}

J
Jason Wang 已提交
563
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
J
Jason Wang 已提交
564

J
Jason Wang 已提交
565 566
static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
                                        Error **errp)
A
aliguori 已提交
567
{
568
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
569
    NetClientState *nc = qemu_get_queue(n->nic);
A
aliguori 已提交
570

571 572 573
    /* Firstly sync all virtio-net possible supported features */
    features |= n->host_features;

574
    virtio_add_feature(&features, VIRTIO_NET_F_MAC);
575

576
    if (!peer_has_vnet_hdr(n)) {
577 578 579 580
        virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
581

582 583 584 585
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
586
    }
M
Mark McLoughlin 已提交
587

588
    if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
589 590
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
M
Mark McLoughlin 已提交
591 592
    }

593
    if (!get_vhost_net(nc->peer)) {
594 595
        return features;
    }
596 597 598 599 600 601 602 603 604
    features = vhost_net_get_features(get_vhost_net(nc->peer), features);
    vdev->backend_features = features;

    if (n->mtu_bypass_backend &&
            (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
        features |= (1ULL << VIRTIO_NET_F_MTU);
    }

    return features;
A
aliguori 已提交
605 606
}

G
Gerd Hoffmann 已提交
607
static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
608
{
G
Gerd Hoffmann 已提交
609
    uint64_t features = 0;
610 611 612

    /* Linux kernel 2.6.25.  It understood MAC (as everyone must),
     * but also these: */
613 614 615 616 617
    virtio_add_feature(&features, VIRTIO_NET_F_MAC);
    virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
    virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
    virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
    virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
618

619
    return features;
620 621
}

622 623
static void virtio_net_apply_guest_offloads(VirtIONet *n)
{
624
    qemu_set_offload(qemu_get_queue(n->nic)->peer,
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
}

static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
{
    static const uint64_t guest_offloads_mask =
        (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
        (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
        (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
        (1ULL << VIRTIO_NET_F_GUEST_ECN)  |
        (1ULL << VIRTIO_NET_F_GUEST_UFO);

    return guest_offloads_mask & features;
}

static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    return virtio_net_guest_offloads_by_features(vdev->guest_features);
}

G
Gerd Hoffmann 已提交
650
static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
A
aliguori 已提交
651
{
652
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
653 654
    int i;

655 656 657 658 659
    if (n->mtu_bypass_backend &&
            !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
        features &= ~(1ULL << VIRTIO_NET_F_MTU);
    }

660
    virtio_net_set_multiqueue(n,
661
                              virtio_has_feature(features, VIRTIO_NET_F_MQ));
A
aliguori 已提交
662

663
    virtio_net_set_mrg_rx_bufs(n,
664 665 666 667
                               virtio_has_feature(features,
                                                  VIRTIO_NET_F_MRG_RXBUF),
                               virtio_has_feature(features,
                                                  VIRTIO_F_VERSION_1));
668 669

    if (n->has_vnet_hdr) {
670 671 672
        n->curr_guest_offloads =
            virtio_net_guest_offloads_by_features(features);
        virtio_net_apply_guest_offloads(n);
673
    }
J
Jason Wang 已提交
674 675 676 677

    for (i = 0;  i < n->max_queues; i++) {
        NetClientState *nc = qemu_get_subqueue(n->nic, i);

678
        if (!get_vhost_net(nc->peer)) {
J
Jason Wang 已提交
679 680
            continue;
        }
681
        vhost_net_ack_features(get_vhost_net(nc->peer), features);
D
David L Stevens 已提交
682
    }
683

684
    if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
685 686 687 688
        memset(n->vlans, 0, MAX_VLAN >> 3);
    } else {
        memset(n->vlans, 0xff, MAX_VLAN >> 3);
    }
A
aliguori 已提交
689 690
}

691
static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
692
                                     struct iovec *iov, unsigned int iov_cnt)
693 694
{
    uint8_t on;
695
    size_t s;
696
    NetClientState *nc = qemu_get_queue(n->nic);
697

698 699 700
    s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
    if (s != sizeof(on)) {
        return VIRTIO_NET_ERR;
701 702
    }

A
Amos Kong 已提交
703
    if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
704
        n->promisc = on;
A
Amos Kong 已提交
705
    } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
706
        n->allmulti = on;
A
Amos Kong 已提交
707
    } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
708
        n->alluni = on;
A
Amos Kong 已提交
709
    } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
710
        n->nomulti = on;
A
Amos Kong 已提交
711
    } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
712
        n->nouni = on;
A
Amos Kong 已提交
713
    } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
714
        n->nobcast = on;
715
    } else {
716
        return VIRTIO_NET_ERR;
717
    }
718

719 720
    rxfilter_notify(nc);

721 722 723
    return VIRTIO_NET_OK;
}

724 725 726 727 728 729 730
static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
                                     struct iovec *iov, unsigned int iov_cnt)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    uint64_t offloads;
    size_t s;

731
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
        return VIRTIO_NET_ERR;
    }

    s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
    if (s != sizeof(offloads)) {
        return VIRTIO_NET_ERR;
    }

    if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
        uint64_t supported_offloads;

        if (!n->has_vnet_hdr) {
            return VIRTIO_NET_ERR;
        }

        supported_offloads = virtio_net_supported_guest_offloads(n);
        if (offloads & ~supported_offloads) {
            return VIRTIO_NET_ERR;
        }

        n->curr_guest_offloads = offloads;
        virtio_net_apply_guest_offloads(n);

        return VIRTIO_NET_OK;
    } else {
        return VIRTIO_NET_ERR;
    }
}

761
static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
762
                                 struct iovec *iov, unsigned int iov_cnt)
763
{
764
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
765
    struct virtio_net_ctrl_mac mac_data;
766
    size_t s;
767
    NetClientState *nc = qemu_get_queue(n->nic);
768

769 770 771 772 773 774
    if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
        if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
            return VIRTIO_NET_ERR;
        }
        s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
        assert(s == sizeof(n->mac));
J
Jason Wang 已提交
775
        qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
776 777
        rxfilter_notify(nc);

778 779 780
        return VIRTIO_NET_OK;
    }

781
    if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
782
        return VIRTIO_NET_ERR;
783
    }
784

785 786 787 788 789
    int in_use = 0;
    int first_multi = 0;
    uint8_t uni_overflow = 0;
    uint8_t multi_overflow = 0;
    uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
790

791 792
    s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
                   sizeof(mac_data.entries));
793
    mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
794
    if (s != sizeof(mac_data.entries)) {
795
        goto error;
796 797
    }
    iov_discard_front(&iov, &iov_cnt, s);
798

799
    if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
800
        goto error;
801
    }
802 803

    if (mac_data.entries <= MAC_TABLE_ENTRIES) {
804
        s = iov_to_buf(iov, iov_cnt, 0, macs,
805 806
                       mac_data.entries * ETH_ALEN);
        if (s != mac_data.entries * ETH_ALEN) {
807
            goto error;
808
        }
809
        in_use += mac_data.entries;
810
    } else {
811
        uni_overflow = 1;
812 813
    }

814 815
    iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);

816
    first_multi = in_use;
817

818 819
    s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
                   sizeof(mac_data.entries));
820
    mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
821
    if (s != sizeof(mac_data.entries)) {
822
        goto error;
823 824 825
    }

    iov_discard_front(&iov, &iov_cnt, s);
826

827
    if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
828
        goto error;
829
    }
830

831
    if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
832
        s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
833 834
                       mac_data.entries * ETH_ALEN);
        if (s != mac_data.entries * ETH_ALEN) {
835
            goto error;
836
        }
837
        in_use += mac_data.entries;
838
    } else {
839
        multi_overflow = 1;
840 841
    }

842 843 844 845 846 847
    n->mac_table.in_use = in_use;
    n->mac_table.first_multi = first_multi;
    n->mac_table.uni_overflow = uni_overflow;
    n->mac_table.multi_overflow = multi_overflow;
    memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
    g_free(macs);
848 849
    rxfilter_notify(nc);

850
    return VIRTIO_NET_OK;
851 852

error:
853
    g_free(macs);
854
    return VIRTIO_NET_ERR;
855 856
}

857
static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
858
                                        struct iovec *iov, unsigned int iov_cnt)
859
{
860
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
861
    uint16_t vid;
862
    size_t s;
863
    NetClientState *nc = qemu_get_queue(n->nic);
864

865
    s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
866
    vid = virtio_lduw_p(vdev, &vid);
867
    if (s != sizeof(vid)) {
868 869 870 871 872 873 874 875 876 877 878 879 880
        return VIRTIO_NET_ERR;
    }

    if (vid >= MAX_VLAN)
        return VIRTIO_NET_ERR;

    if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
        n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
    else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
        n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
    else
        return VIRTIO_NET_ERR;

881 882
    rxfilter_notify(nc);

883 884 885
    return VIRTIO_NET_OK;
}

J
Jason Wang 已提交
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
                                      struct iovec *iov, unsigned int iov_cnt)
{
    if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
        n->status & VIRTIO_NET_S_ANNOUNCE) {
        n->status &= ~VIRTIO_NET_S_ANNOUNCE;
        if (n->announce_counter) {
            timer_mod(n->announce_timer,
                      qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
                      self_announce_delay(n->announce_counter));
        }
        return VIRTIO_NET_OK;
    } else {
        return VIRTIO_NET_ERR;
    }
}

J
Jason Wang 已提交
903
static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
904
                                struct iovec *iov, unsigned int iov_cnt)
J
Jason Wang 已提交
905
{
906
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
907 908 909
    struct virtio_net_ctrl_mq mq;
    size_t s;
    uint16_t queues;
J
Jason Wang 已提交
910

911 912
    s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
    if (s != sizeof(mq)) {
J
Jason Wang 已提交
913 914 915 916 917 918 919
        return VIRTIO_NET_ERR;
    }

    if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
        return VIRTIO_NET_ERR;
    }

920
    queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
J
Jason Wang 已提交
921

922 923 924
    if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
        queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
        queues > n->max_queues ||
J
Jason Wang 已提交
925 926 927 928
        !n->multiqueue) {
        return VIRTIO_NET_ERR;
    }

929
    n->curr_queues = queues;
J
Jason Wang 已提交
930 931
    /* stop the backend before changing the number of queues to avoid handling a
     * disabled queue */
932
    virtio_net_set_status(vdev, vdev->status);
J
Jason Wang 已提交
933 934 935 936
    virtio_net_set_queues(n);

    return VIRTIO_NET_OK;
}
937

938 939
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
940
    VirtIONet *n = VIRTIO_NET(vdev);
941 942
    struct virtio_net_ctrl_hdr ctrl;
    virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
943
    VirtQueueElement *elem;
944
    size_t s;
J
Jason Wang 已提交
945
    struct iovec *iov, *iov2;
946
    unsigned int iov_cnt;
947

948 949 950 951 952 953 954
    for (;;) {
        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
        if (!elem) {
            break;
        }
        if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
            iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
955 956 957 958
            virtio_error(vdev, "virtio-net ctrl missing headers");
            virtqueue_detach_element(vq, elem, 0);
            g_free(elem);
            break;
959 960
        }

961 962
        iov_cnt = elem->out_num;
        iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
963 964 965 966
        s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
        iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
        if (s != sizeof(ctrl)) {
            status = VIRTIO_NET_ERR;
A
Amos Kong 已提交
967
        } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
968 969 970 971 972
            status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
        } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
            status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
        } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
            status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
J
Jason Wang 已提交
973 974
        } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
            status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
J
Jason Wang 已提交
975
        } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
976
            status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
977 978
        } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
            status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
979 980
        }

981
        s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
982
        assert(s == sizeof(status));
983

984
        virtqueue_push(vq, elem, sizeof(status));
985
        virtio_notify(vdev, vq);
J
Jason Wang 已提交
986
        g_free(iov2);
987
        g_free(elem);
988 989 990
    }
}

A
aliguori 已提交
991 992 993 994
/* RX */

static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
{
995
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
996
    int queue_index = vq2q(virtio_get_queue_index(vq));
997

J
Jason Wang 已提交
998
    qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
A
aliguori 已提交
999 1000
}

1001
static int virtio_net_can_receive(NetClientState *nc)
A
aliguori 已提交
1002
{
J
Jason Wang 已提交
1003
    VirtIONet *n = qemu_get_nic_opaque(nc);
1004
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
J
Jason Wang 已提交
1005
    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1006

1007
    if (!vdev->vm_running) {
1008 1009
        return 0;
    }
1010

J
Jason Wang 已提交
1011 1012 1013 1014
    if (nc->queue_index >= n->curr_queues) {
        return 0;
    }

1015
    if (!virtio_queue_ready(q->rx_vq) ||
1016
        !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
A
aliguori 已提交
1017
        return 0;
1018
    }
A
aliguori 已提交
1019

1020 1021 1022
    return 1;
}

1023
static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1024
{
1025 1026
    VirtIONet *n = q->n;
    if (virtio_queue_empty(q->rx_vq) ||
A
aliguori 已提交
1027
        (n->mergeable_rx_bufs &&
1028 1029
         !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
        virtio_queue_set_notification(q->rx_vq, 1);
1030 1031 1032 1033 1034

        /* To avoid a race condition where the guest has made some buffers
         * available after the above check but before notification was
         * enabled, check for available buffers again.
         */
1035
        if (virtio_queue_empty(q->rx_vq) ||
1036
            (n->mergeable_rx_bufs &&
1037
             !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1038
            return 0;
1039
        }
A
aliguori 已提交
1040 1041
    }

1042
    virtio_queue_set_notification(q->rx_vq, 0);
A
aliguori 已提交
1043 1044 1045
    return 1;
}

1046
static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1047
{
1048 1049 1050 1051
    virtio_tswap16s(vdev, &hdr->hdr_len);
    virtio_tswap16s(vdev, &hdr->gso_size);
    virtio_tswap16s(vdev, &hdr->csum_start);
    virtio_tswap16s(vdev, &hdr->csum_offset);
1052 1053
}

A
Anthony Liguori 已提交
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
 * it never finds out that the packets don't have valid checksums.  This
 * causes dhclient to get upset.  Fedora's carried a patch for ages to
 * fix this with Xen but it hasn't appeared in an upstream release of
 * dhclient yet.
 *
 * To avoid breaking existing guests, we catch udp packets and add
 * checksums.  This is terrible but it's better than hacking the guest
 * kernels.
 *
 * N.B. if we introduce a zero-copy API, this operation is no longer free so
 * we should provide a mechanism to disable it to avoid polluting the host
 * cache.
 */
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
M
Michael S. Tsirkin 已提交
1069
                                        uint8_t *buf, size_t size)
A
Anthony Liguori 已提交
1070 1071 1072 1073 1074 1075
{
    if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
        (size > 27 && size < 1500) && /* normal sized MTU */
        (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
        (buf[23] == 17) && /* ip.protocol == UDP */
        (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
M
Michael S. Tsirkin 已提交
1076
        net_checksum_calculate(buf, size);
A
Anthony Liguori 已提交
1077 1078 1079 1080
        hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
    }
}

1081 1082
static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
                           const void *buf, size_t size)
A
aliguori 已提交
1083
{
M
Mark McLoughlin 已提交
1084
    if (n->has_vnet_hdr) {
M
Michael S. Tsirkin 已提交
1085 1086
        /* FIXME this cast is evil */
        void *wbuf = (void *)buf;
1087 1088
        work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
                                    size - n->host_hdr_len);
1089 1090 1091 1092

        if (n->needs_vnet_hdr_swap) {
            virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
        }
1093
        iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
M
Michael S. Tsirkin 已提交
1094 1095 1096 1097 1098 1099
    } else {
        struct virtio_net_hdr hdr = {
            .flags = 0,
            .gso_type = VIRTIO_NET_HDR_GSO_NONE
        };
        iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
M
Mark McLoughlin 已提交
1100
    }
A
aliguori 已提交
1101 1102
}

1103 1104 1105
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
{
    static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1106
    static const uint8_t vlan[] = {0x81, 0x00};
1107
    uint8_t *ptr = (uint8_t *)buf;
1108
    int i;
1109 1110 1111 1112

    if (n->promisc)
        return 1;

1113
    ptr += n->host_hdr_len;
M
Mark McLoughlin 已提交
1114

1115
    if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1116
        int vid = lduw_be_p(ptr + 14) & 0xfff;
1117 1118 1119 1120
        if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
            return 0;
    }

1121 1122
    if (ptr[0] & 1) { // multicast
        if (!memcmp(ptr, bcast, sizeof(bcast))) {
1123 1124 1125
            return !n->nobcast;
        } else if (n->nomulti) {
            return 0;
1126
        } else if (n->allmulti || n->mac_table.multi_overflow) {
1127 1128
            return 1;
        }
1129 1130 1131 1132 1133 1134

        for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
            if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
                return 1;
            }
        }
1135
    } else { // unicast
1136 1137 1138
        if (n->nouni) {
            return 0;
        } else if (n->alluni || n->mac_table.uni_overflow) {
1139 1140
            return 1;
        } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1141 1142
            return 1;
        }
1143

1144 1145 1146 1147 1148
        for (i = 0; i < n->mac_table.first_multi; i++) {
            if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
                return 1;
            }
        }
1149 1150
    }

1151 1152 1153
    return 0;
}

1154 1155
static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
                                      size_t size)
A
aliguori 已提交
1156
{
J
Jason Wang 已提交
1157
    VirtIONet *n = qemu_get_nic_opaque(nc);
J
Jason Wang 已提交
1158
    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1159
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1160 1161 1162
    struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
    struct virtio_net_hdr_mrg_rxbuf mhdr;
    unsigned mhdr_cnt = 0;
M
Michael S. Tsirkin 已提交
1163
    size_t offset, i, guest_offset;
A
aliguori 已提交
1164

J
Jason Wang 已提交
1165
    if (!virtio_net_can_receive(nc)) {
1166
        return -1;
J
Jason Wang 已提交
1167
    }
1168

1169
    /* hdr_len refers to the header we supply to the guest */
1170
    if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1171
        return 0;
1172
    }
A
aliguori 已提交
1173

1174
    if (!receive_filter(n, buf, size))
1175
        return size;
1176

A
aliguori 已提交
1177 1178 1179
    offset = i = 0;

    while (offset < size) {
1180
        VirtQueueElement *elem;
A
aliguori 已提交
1181
        int len, total;
1182
        const struct iovec *sg;
A
aliguori 已提交
1183

A
Amit Shah 已提交
1184
        total = 0;
A
aliguori 已提交
1185

1186 1187
        elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
        if (!elem) {
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
            if (i) {
                virtio_error(vdev, "virtio-net unexpected empty queue: "
                             "i %zd mergeable %d offset %zd, size %zd, "
                             "guest hdr len %zd, host hdr len %zd "
                             "guest features 0x%" PRIx64,
                             i, n->mergeable_rx_bufs, offset, size,
                             n->guest_hdr_len, n->host_hdr_len,
                             vdev->guest_features);
            }
            return -1;
A
aliguori 已提交
1198 1199
        }

1200
        if (elem->in_num < 1) {
1201 1202 1203 1204 1205
            virtio_error(vdev,
                         "virtio-net receive queue contains no in buffers");
            virtqueue_detach_element(q->rx_vq, elem, 0);
            g_free(elem);
            return -1;
A
aliguori 已提交
1206 1207
        }

1208
        sg = elem->in_sg;
A
aliguori 已提交
1209
        if (i == 0) {
1210
            assert(offset == 0);
1211 1212
            if (n->mergeable_rx_bufs) {
                mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1213
                                    sg, elem->in_num,
1214 1215 1216
                                    offsetof(typeof(mhdr), num_buffers),
                                    sizeof(mhdr.num_buffers));
            }
A
aliguori 已提交
1217

1218
            receive_header(n, sg, elem->in_num, buf, size);
1219
            offset = n->host_hdr_len;
1220
            total += n->guest_hdr_len;
M
Michael S. Tsirkin 已提交
1221 1222 1223
            guest_offset = n->guest_hdr_len;
        } else {
            guest_offset = 0;
A
aliguori 已提交
1224 1225 1226
        }

        /* copy in packet.  ugh */
1227
        len = iov_from_buf(sg, elem->in_num, guest_offset,
1228
                           buf + offset, size - offset);
A
aliguori 已提交
1229
        total += len;
1230 1231 1232 1233 1234
        offset += len;
        /* If buffers can't be merged, at this point we
         * must have consumed the complete packet.
         * Otherwise, drop it. */
        if (!n->mergeable_rx_bufs && offset < size) {
1235
            virtqueue_unpop(q->rx_vq, elem, total);
1236
            g_free(elem);
1237 1238
            return size;
        }
A
aliguori 已提交
1239 1240

        /* signal other side */
1241 1242
        virtqueue_fill(q->rx_vq, elem, total, i++);
        g_free(elem);
A
aliguori 已提交
1243 1244
    }

1245
    if (mhdr_cnt) {
1246
        virtio_stw_p(vdev, &mhdr.num_buffers, i);
1247 1248 1249
        iov_from_buf(mhdr_sg, mhdr_cnt,
                     0,
                     &mhdr.num_buffers, sizeof mhdr.num_buffers);
1250
    }
A
aliguori 已提交
1251

1252
    virtqueue_flush(q->rx_vq, i);
1253
    virtio_notify(vdev, q->rx_vq);
1254 1255

    return size;
A
aliguori 已提交
1256 1257
}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
                                  size_t size)
{
    ssize_t r;

    rcu_read_lock();
    r = virtio_net_receive_rcu(nc, buf, size);
    rcu_read_unlock();
    return r;
}

1269
static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1270

1271
static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1272
{
J
Jason Wang 已提交
1273
    VirtIONet *n = qemu_get_nic_opaque(nc);
J
Jason Wang 已提交
1274
    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1275
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1276

1277
    virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1278
    virtio_notify(vdev, q->tx_vq);
1279

1280 1281
    g_free(q->async_tx.elem);
    q->async_tx.elem = NULL;
1282

1283 1284
    virtio_queue_set_notification(q->tx_vq, 1);
    virtio_net_flush_tx(q);
1285 1286
}

A
aliguori 已提交
1287
/* TX */
1288
static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
A
aliguori 已提交
1289
{
1290
    VirtIONet *n = q->n;
1291
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1292
    VirtQueueElement *elem;
1293
    int32_t num_packets = 0;
J
Jason Wang 已提交
1294
    int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1295
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1296 1297
        return num_packets;
    }
A
aliguori 已提交
1298

1299
    if (q->async_tx.elem) {
1300
        virtio_queue_set_notification(q->tx_vq, 0);
1301
        return num_packets;
1302 1303
    }

1304
    for (;;) {
J
Jason Wang 已提交
1305
        ssize_t ret;
1306 1307
        unsigned int out_num;
        struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
J
Jason Wang 已提交
1308
        struct virtio_net_hdr_mrg_rxbuf mhdr;
A
aliguori 已提交
1309

1310 1311 1312 1313 1314 1315 1316
        elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
        if (!elem) {
            break;
        }

        out_num = elem->out_num;
        out_sg = elem->out_sg;
M
Michael S. Tsirkin 已提交
1317
        if (out_num < 1) {
1318 1319 1320 1321
            virtio_error(vdev, "virtio-net header not in first element");
            virtqueue_detach_element(q->tx_vq, elem, 0);
            g_free(elem);
            return -EINVAL;
A
aliguori 已提交
1322 1323
        }

1324
        if (n->has_vnet_hdr) {
J
Jason Wang 已提交
1325 1326
            if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
                n->guest_hdr_len) {
1327 1328 1329 1330
                virtio_error(vdev, "virtio-net header incorrect");
                virtqueue_detach_element(q->tx_vq, elem, 0);
                g_free(elem);
                return -EINVAL;
1331
            }
1332
            if (n->needs_vnet_hdr_swap) {
J
Jason Wang 已提交
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
                virtio_net_hdr_swap(vdev, (void *) &mhdr);
                sg2[0].iov_base = &mhdr;
                sg2[0].iov_len = n->guest_hdr_len;
                out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
                                   out_sg, out_num,
                                   n->guest_hdr_len, -1);
                if (out_num == VIRTQUEUE_MAX_SIZE) {
                    goto drop;
		}
                out_num += 1;
                out_sg = sg2;
	    }
1345
        }
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
        /*
         * If host wants to see the guest header as is, we can
         * pass it on unchanged. Otherwise, copy just the parts
         * that host is interested in.
         */
        assert(n->host_hdr_len <= n->guest_hdr_len);
        if (n->host_hdr_len != n->guest_hdr_len) {
            unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
                                       out_sg, out_num,
                                       0, n->host_hdr_len);
            sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
                             out_sg, out_num,
                             n->guest_hdr_len, -1);
            out_num = sg_num;
            out_sg = sg;
A
aliguori 已提交
1361 1362
        }

J
Jason Wang 已提交
1363 1364
        ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
                                      out_sg, out_num, virtio_net_tx_complete);
1365
        if (ret == 0) {
1366 1367
            virtio_queue_set_notification(q->tx_vq, 0);
            q->async_tx.elem = elem;
1368
            return -EBUSY;
1369 1370
        }

J
Jason Wang 已提交
1371
drop:
1372
        virtqueue_push(q->tx_vq, elem, 0);
1373
        virtio_notify(vdev, q->tx_vq);
1374
        g_free(elem);
1375 1376 1377 1378

        if (++num_packets >= n->tx_burst) {
            break;
        }
A
aliguori 已提交
1379
    }
1380
    return num_packets;
A
aliguori 已提交
1381 1382
}

1383
static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
A
aliguori 已提交
1384
{
1385
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1386
    VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
A
aliguori 已提交
1387

1388 1389 1390 1391 1392
    if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
        virtio_net_drop_tx_queue_data(vdev, vq);
        return;
    }

1393
    /* This happens when device was stopped but VCPU wasn't. */
1394
    if (!vdev->vm_running) {
1395
        q->tx_waiting = 1;
1396 1397 1398
        return;
    }

1399
    if (q->tx_waiting) {
A
aliguori 已提交
1400
        virtio_queue_set_notification(vq, 1);
1401
        timer_del(q->tx_timer);
1402
        q->tx_waiting = 0;
1403 1404 1405
        if (virtio_net_flush_tx(q) == -EINVAL) {
            return;
        }
A
aliguori 已提交
1406
    } else {
1407 1408
        timer_mod(q->tx_timer,
                       qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1409
        q->tx_waiting = 1;
A
aliguori 已提交
1410 1411 1412 1413
        virtio_queue_set_notification(vq, 0);
    }
}

1414 1415
static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
{
1416
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1417
    VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1418

1419 1420 1421 1422 1423
    if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
        virtio_net_drop_tx_queue_data(vdev, vq);
        return;
    }

1424
    if (unlikely(q->tx_waiting)) {
1425 1426
        return;
    }
1427
    q->tx_waiting = 1;
1428
    /* This happens when device was stopped but VCPU wasn't. */
1429
    if (!vdev->vm_running) {
1430 1431
        return;
    }
1432
    virtio_queue_set_notification(vq, 0);
1433
    qemu_bh_schedule(q->tx_bh);
1434 1435
}

A
aliguori 已提交
1436 1437
static void virtio_net_tx_timer(void *opaque)
{
1438 1439
    VirtIONetQueue *q = opaque;
    VirtIONet *n = q->n;
1440
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1441 1442 1443 1444 1445 1446
    /* This happens when device was stopped but BH wasn't. */
    if (!vdev->vm_running) {
        /* Make sure tx waiting is set, so we'll run when restarted. */
        assert(q->tx_waiting);
        return;
    }
A
aliguori 已提交
1447

1448
    q->tx_waiting = 0;
A
aliguori 已提交
1449 1450

    /* Just in case the driver is not ready on more */
1451
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
A
aliguori 已提交
1452
        return;
1453
    }
A
aliguori 已提交
1454

1455 1456
    virtio_queue_set_notification(q->tx_vq, 1);
    virtio_net_flush_tx(q);
A
aliguori 已提交
1457 1458
}

1459 1460
static void virtio_net_tx_bh(void *opaque)
{
1461 1462
    VirtIONetQueue *q = opaque;
    VirtIONet *n = q->n;
1463
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1464 1465
    int32_t ret;

1466 1467 1468 1469 1470 1471
    /* This happens when device was stopped but BH wasn't. */
    if (!vdev->vm_running) {
        /* Make sure tx waiting is set, so we'll run when restarted. */
        assert(q->tx_waiting);
        return;
    }
1472

1473
    q->tx_waiting = 0;
1474 1475

    /* Just in case the driver is not ready on more */
1476
    if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1477
        return;
1478
    }
1479

1480
    ret = virtio_net_flush_tx(q);
1481 1482 1483
    if (ret == -EBUSY || ret == -EINVAL) {
        return; /* Notification re-enable handled by tx_complete or device
                 * broken */
1484 1485 1486 1487 1488
    }

    /* If we flush a full burst of packets, assume there are
     * more coming and immediately reschedule */
    if (ret >= n->tx_burst) {
1489 1490
        qemu_bh_schedule(q->tx_bh);
        q->tx_waiting = 1;
1491 1492 1493 1494 1495 1496
        return;
    }

    /* If less than a full burst, re-enable notification and flush
     * anything that may have come in while we weren't looking.  If
     * we find something, assume the guest is still active and reschedule */
1497
    virtio_queue_set_notification(q->tx_vq, 1);
1498 1499 1500 1501
    ret = virtio_net_flush_tx(q);
    if (ret == -EINVAL) {
        return;
    } else if (ret > 0) {
1502 1503 1504
        virtio_queue_set_notification(q->tx_vq, 0);
        qemu_bh_schedule(q->tx_bh);
        q->tx_waiting = 1;
1505 1506 1507
    }
}

1508 1509 1510 1511
static void virtio_net_add_queue(VirtIONet *n, int index)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);

1512 1513
    n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
                                           virtio_net_handle_rx);
1514

1515 1516
    if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
        n->vqs[index].tx_vq =
1517 1518
            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
                             virtio_net_handle_tx_timer);
1519 1520 1521 1522 1523
        n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                              virtio_net_tx_timer,
                                              &n->vqs[index]);
    } else {
        n->vqs[index].tx_vq =
1524 1525
            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
                             virtio_net_handle_tx_bh);
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
        n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
    }

    n->vqs[index].tx_waiting = 0;
    n->vqs[index].n = n;
}

static void virtio_net_del_queue(VirtIONet *n, int index)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    VirtIONetQueue *q = &n->vqs[index];
    NetClientState *nc = qemu_get_subqueue(n->nic, index);

    qemu_purge_queued_packets(nc);

    virtio_del_queue(vdev, index * 2);
    if (q->tx_timer) {
        timer_del(q->tx_timer);
        timer_free(q->tx_timer);
1545
        q->tx_timer = NULL;
1546 1547
    } else {
        qemu_bh_delete(q->tx_bh);
1548
        q->tx_bh = NULL;
1549
    }
1550
    q->tx_waiting = 0;
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
    virtio_del_queue(vdev, index * 2 + 1);
}

static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    int old_num_queues = virtio_get_num_queues(vdev);
    int new_num_queues = new_max_queues * 2 + 1;
    int i;

    assert(old_num_queues >= 3);
    assert(old_num_queues % 2 == 1);

    if (old_num_queues == new_num_queues) {
        return;
    }

    /*
     * We always need to remove and add ctrl vq if
     * old_num_queues != new_num_queues. Remove ctrl_vq first,
     * and then we only enter one of the following too loops.
     */
    virtio_del_queue(vdev, old_num_queues - 1);

    for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
        /* new_num_queues < old_num_queues */
        virtio_net_del_queue(n, i / 2);
    }

    for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
        /* new_num_queues > old_num_queues */
        virtio_net_add_queue(n, i / 2);
    }

    /* add ctrl_vq last */
    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
}

J
Jason Wang 已提交
1589
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
J
Jason Wang 已提交
1590
{
1591 1592
    int max = multiqueue ? n->max_queues : 1;

J
Jason Wang 已提交
1593
    n->multiqueue = multiqueue;
1594
    virtio_net_change_num_queues(n, max);
J
Jason Wang 已提交
1595 1596 1597 1598

    virtio_net_set_queues(n);
}

1599
static int virtio_net_post_load_device(void *opaque, int version_id)
1600
{
1601 1602
    VirtIONet *n = opaque;
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1603
    int i, link_down;
A
aliguori 已提交
1604

1605
    virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
1606 1607
                               virtio_vdev_has_feature(vdev,
                                                       VIRTIO_F_VERSION_1));
A
aliguori 已提交
1608

1609
    /* MAC_TABLE_ENTRIES may be different from the saved image */
1610
    if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
1611
        n->mac_table.in_use = 0;
1612
    }
1613

1614
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1615 1616 1617 1618 1619 1620 1621
        n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
    }

    if (peer_has_vnet_hdr(n)) {
        virtio_net_apply_guest_offloads(n);
    }

1622 1623
    virtio_net_set_queues(n);

1624 1625 1626 1627 1628 1629 1630
    /* Find the first multicast entry in the saved MAC filter */
    for (i = 0; i < n->mac_table.in_use; i++) {
        if (n->mac_table.macs[i * ETH_ALEN] & 1) {
            break;
        }
    }
    n->mac_table.first_multi = i;
1631 1632 1633

    /* nc.link_down can't be migrated, so infer link_down according
     * to link status bit in n->status */
1634 1635 1636 1637
    link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
    for (i = 0; i < n->max_queues; i++) {
        qemu_get_subqueue(n->nic, i)->link_down = link_down;
    }
1638

1639 1640 1641 1642 1643 1644
    if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
        virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
        n->announce_counter = SELF_ANNOUNCE_ROUNDS;
        timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
    }

A
aliguori 已提交
1645 1646 1647
    return 0;
}

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
/* tx_waiting field of a VirtIONetQueue */
static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
    .name = "virtio-net-queue-tx_waiting",
    .fields = (VMStateField[]) {
        VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
        VMSTATE_END_OF_LIST()
   },
};

static bool max_queues_gt_1(void *opaque, int version_id)
{
    return VIRTIO_NET(opaque)->max_queues > 1;
}

static bool has_ctrl_guest_offloads(void *opaque, int version_id)
{
    return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
                                   VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
}

static bool mac_table_fits(void *opaque, int version_id)
{
    return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
}

static bool mac_table_doesnt_fit(void *opaque, int version_id)
{
    return !mac_table_fits(opaque, version_id);
}

/* This temporary type is shared by all the WITH_TMP methods
 * although only some fields are used by each.
 */
struct VirtIONetMigTmp {
    VirtIONet      *parent;
    VirtIONetQueue *vqs_1;
    uint16_t        curr_queues_1;
    uint8_t         has_ufo;
    uint32_t        has_vnet_hdr;
};

/* The 2nd and subsequent tx_waiting flags are loaded later than
 * the 1st entry in the queues and only if there's more than one
 * entry.  We use the tmp mechanism to calculate a temporary
 * pointer and count and also validate the count.
 */

static void virtio_net_tx_waiting_pre_save(void *opaque)
{
    struct VirtIONetMigTmp *tmp = opaque;

    tmp->vqs_1 = tmp->parent->vqs + 1;
    tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
    if (tmp->parent->curr_queues == 0) {
        tmp->curr_queues_1 = 0;
    }
}

static int virtio_net_tx_waiting_pre_load(void *opaque)
{
    struct VirtIONetMigTmp *tmp = opaque;

    /* Reuse the pointer setup from save */
    virtio_net_tx_waiting_pre_save(opaque);

    if (tmp->parent->curr_queues > tmp->parent->max_queues) {
        error_report("virtio-net: curr_queues %x > max_queues %x",
            tmp->parent->curr_queues, tmp->parent->max_queues);

        return -EINVAL;
    }

    return 0; /* all good */
}

static const VMStateDescription vmstate_virtio_net_tx_waiting = {
    .name      = "virtio-net-tx_waiting",
    .pre_load  = virtio_net_tx_waiting_pre_load,
    .pre_save  = virtio_net_tx_waiting_pre_save,
    .fields    = (VMStateField[]) {
        VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
                                     curr_queues_1,
                                     vmstate_virtio_net_queue_tx_waiting,
                                     struct VirtIONetQueue),
        VMSTATE_END_OF_LIST()
    },
};

/* the 'has_ufo' flag is just tested; if the incoming stream has the
 * flag set we need to check that we have it
 */
static int virtio_net_ufo_post_load(void *opaque, int version_id)
{
    struct VirtIONetMigTmp *tmp = opaque;

    if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
        error_report("virtio-net: saved image requires TUN_F_UFO support");
        return -EINVAL;
    }

    return 0;
}

static void virtio_net_ufo_pre_save(void *opaque)
{
    struct VirtIONetMigTmp *tmp = opaque;

    tmp->has_ufo = tmp->parent->has_ufo;
}

static const VMStateDescription vmstate_virtio_net_has_ufo = {
    .name      = "virtio-net-ufo",
    .post_load = virtio_net_ufo_post_load,
    .pre_save  = virtio_net_ufo_pre_save,
    .fields    = (VMStateField[]) {
        VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
        VMSTATE_END_OF_LIST()
    },
};

/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
 * flag set we need to check that we have it
 */
static int virtio_net_vnet_post_load(void *opaque, int version_id)
{
    struct VirtIONetMigTmp *tmp = opaque;

    if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
        error_report("virtio-net: saved image requires vnet_hdr=on");
        return -EINVAL;
    }

    return 0;
}

static void virtio_net_vnet_pre_save(void *opaque)
{
    struct VirtIONetMigTmp *tmp = opaque;

    tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
}

static const VMStateDescription vmstate_virtio_net_has_vnet = {
    .name      = "virtio-net-vnet",
    .post_load = virtio_net_vnet_post_load,
    .pre_save  = virtio_net_vnet_pre_save,
    .fields    = (VMStateField[]) {
        VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
        VMSTATE_END_OF_LIST()
    },
};

static const VMStateDescription vmstate_virtio_net_device = {
    .name = "virtio-net-device",
    .version_id = VIRTIO_NET_VM_VERSION,
    .minimum_version_id = VIRTIO_NET_VM_VERSION,
    .post_load = virtio_net_post_load_device,
    .fields = (VMStateField[]) {
        VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
        VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
                               vmstate_virtio_net_queue_tx_waiting,
                               VirtIONetQueue),
        VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
        VMSTATE_UINT16(status, VirtIONet),
        VMSTATE_UINT8(promisc, VirtIONet),
        VMSTATE_UINT8(allmulti, VirtIONet),
        VMSTATE_UINT32(mac_table.in_use, VirtIONet),

        /* Guarded pair: If it fits we load it, else we throw it away
         * - can happen if source has a larger MAC table.; post-load
         *  sets flags in this case.
         */
        VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
                                0, mac_table_fits, mac_table.in_use,
                                 ETH_ALEN),
        VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
                                     mac_table.in_use, ETH_ALEN),

        /* Note: This is an array of uint32's that's always been saved as a
         * buffer; hold onto your endiannesses; it's actually used as a bitmap
         * but based on the uint.
         */
        VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
        VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
                         vmstate_virtio_net_has_vnet),
        VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
        VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
        VMSTATE_UINT8(alluni, VirtIONet),
        VMSTATE_UINT8(nomulti, VirtIONet),
        VMSTATE_UINT8(nouni, VirtIONet),
        VMSTATE_UINT8(nobcast, VirtIONet),
        VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
                         vmstate_virtio_net_has_ufo),
        VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
                            vmstate_info_uint16_equal, uint16_t),
        VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
        VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
                         vmstate_virtio_net_tx_waiting),
        VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
                            has_ctrl_guest_offloads),
        VMSTATE_END_OF_LIST()
   },
};

M
Mark McLoughlin 已提交
1852
static NetClientInfo net_virtio_info = {
1853
    .type = NET_CLIENT_DRIVER_NIC,
M
Mark McLoughlin 已提交
1854 1855 1856 1857
    .size = sizeof(NICState),
    .can_receive = virtio_net_can_receive,
    .receive = virtio_net_receive,
    .link_status_changed = virtio_net_set_link_status,
1858
    .query_rx_filter = virtio_net_query_rxfilter,
M
Mark McLoughlin 已提交
1859 1860
};

1861 1862
static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
{
1863
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1864
    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1865
    assert(n->vhost_started);
1866
    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1867 1868 1869 1870 1871
}

static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
                                           bool mask)
{
1872
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1873
    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1874
    assert(n->vhost_started);
1875
    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1876 1877 1878
                             vdev, idx, mask);
}

G
Gerd Hoffmann 已提交
1879
static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
A
aliguori 已提交
1880
{
1881
    int i, config_size = 0;
1882
    virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
1883

1884 1885 1886 1887 1888
    for (i = 0; feature_sizes[i].flags != 0; i++) {
        if (host_features & feature_sizes[i].flags) {
            config_size = MAX(feature_sizes[i].end, config_size);
        }
    }
1889 1890 1891
    n->config_size = config_size;
}

1892 1893 1894 1895 1896 1897 1898 1899
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
                                   const char *type)
{
    /*
     * The name can be NULL, the netclient name will be type.x.
     */
    assert(type != NULL);

1900 1901
    g_free(n->netclient_name);
    g_free(n->netclient_type);
1902
    n->netclient_name = g_strdup(name);
1903 1904 1905
    n->netclient_type = g_strdup(type);
}

1906
static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1907
{
1908
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1909
    VirtIONet *n = VIRTIO_NET(dev);
1910
    NetClientState *nc;
1911
    int i;
1912

1913 1914 1915 1916
    if (n->net_conf.mtu) {
        n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
    }

1917
    virtio_net_set_config_size(n, n->host_features);
1918
    virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
A
aliguori 已提交
1919

1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
    /*
     * We set a lower limit on RX queue size to what it always was.
     * Guests that want a smaller ring can always resize it without
     * help from us (using virtio 1 and up).
     */
    if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
        n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
        (n->net_conf.rx_queue_size & (n->net_conf.rx_queue_size - 1))) {
        error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
                   "must be a power of 2 between %d and %d.",
                   n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
                   VIRTQUEUE_MAX_SIZE);
        virtio_cleanup(vdev);
        return;
    }

1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
        !is_power_of_2(n->net_conf.tx_queue_size)) {
        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
                   "must be a power of 2 between %d and %d",
                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
                   VIRTQUEUE_MAX_SIZE);
        virtio_cleanup(vdev);
        return;
    }

1947
    n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1948
    if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
1949
        error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
S
Stefan Weil 已提交
1950
                   "must be a positive integer less than %d.",
1951
                   n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
1952 1953 1954
        virtio_cleanup(vdev);
        return;
    }
1955
    n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
J
Jason Wang 已提交
1956
    n->curr_queues = 1;
1957
    n->tx_timeout = n->net_conf.txtimer;
1958

1959 1960
    if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
                       && strcmp(n->net_conf.tx, "bh")) {
1961 1962
        error_report("virtio-net: "
                     "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1963
                     n->net_conf.tx);
1964
        error_report("Defaulting to \"bh\"");
1965 1966
    }

1967 1968 1969 1970 1971 1972 1973 1974 1975
    /*
     * Currently, backends other than vhost-user don't support 1024 queue
     * size.
     */
    if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
        n->nic_conf.peers.ncs[0]->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
        n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
    }

1976
    for (i = 0; i < n->max_queues; i++) {
1977
        virtio_net_add_queue(n, i);
1978
    }
1979

1980
    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1981 1982
    qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
    memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1983
    n->status = VIRTIO_NET_S_LINK_UP;
J
Jason Wang 已提交
1984 1985
    n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
                                     virtio_net_announce_timer, n);
A
aliguori 已提交
1986

1987 1988 1989 1990 1991 1992 1993 1994
    if (n->netclient_type) {
        /*
         * Happen when virtio_net_set_netclient_name has been called.
         */
        n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
                              n->netclient_type, n->netclient_name, n);
    } else {
        n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1995
                              object_get_typename(OBJECT(dev)), dev->id, n);
1996 1997
    }

1998 1999
    peer_test_vnet_hdr(n);
    if (peer_has_vnet_hdr(n)) {
J
Jason Wang 已提交
2000
        for (i = 0; i < n->max_queues; i++) {
2001
            qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
J
Jason Wang 已提交
2002
        }
2003 2004 2005 2006
        n->host_hdr_len = sizeof(struct virtio_net_hdr);
    } else {
        n->host_hdr_len = 0;
    }
M
Mark McLoughlin 已提交
2007

2008
    qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2009

J
Jason Wang 已提交
2010
    n->vqs[0].tx_waiting = 0;
2011
    n->tx_burst = n->net_conf.txburst;
2012
    virtio_net_set_mrg_rx_bufs(n, 0, 0);
2013
    n->promisc = 1; /* for compatibility */
A
aliguori 已提交
2014

2015
    n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2016

2017
    n->vlans = g_malloc0(MAX_VLAN >> 3);
2018

2019 2020 2021
    nc = qemu_get_queue(n->nic);
    nc->rxfilter_notify_enabled = 1;

2022
    n->qdev = dev;
2023 2024
}

2025
static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2026
{
2027 2028
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtIONet *n = VIRTIO_NET(dev);
2029
    int i, max_queues;
2030 2031 2032 2033

    /* This will stop vhost backend if appropriate. */
    virtio_net_set_status(vdev, 0);

2034 2035 2036 2037
    g_free(n->netclient_name);
    n->netclient_name = NULL;
    g_free(n->netclient_type);
    n->netclient_type = NULL;
2038

2039 2040 2041
    g_free(n->mac_table.macs);
    g_free(n->vlans);

2042 2043 2044
    max_queues = n->multiqueue ? n->max_queues : 1;
    for (i = 0; i < max_queues; i++) {
        virtio_net_del_queue(n, i);
2045 2046
    }

J
Jason Wang 已提交
2047 2048
    timer_del(n->announce_timer);
    timer_free(n->announce_timer);
2049 2050
    g_free(n->vqs);
    qemu_del_nic(n->nic);
2051
    virtio_cleanup(vdev);
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
}

static void virtio_net_instance_init(Object *obj)
{
    VirtIONet *n = VIRTIO_NET(obj);

    /*
     * The default config_size is sizeof(struct virtio_net_config).
     * Can be overriden with virtio_net_set_config_size.
     */
    n->config_size = sizeof(struct virtio_net_config);
2063 2064 2065
    device_add_bootindex_property(obj, &n->nic_conf.bootindex,
                                  "bootindex", "/ethernet-phy@0",
                                  DEVICE(n), NULL);
2066 2067
}

2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
static void virtio_net_pre_save(void *opaque)
{
    VirtIONet *n = opaque;

    /* At this point, backend must be stopped, otherwise
     * it might keep writing to memory. */
    assert(!n->vhost_started);
}

static const VMStateDescription vmstate_virtio_net = {
    .name = "virtio-net",
    .minimum_version_id = VIRTIO_NET_VM_VERSION,
    .version_id = VIRTIO_NET_VM_VERSION,
    .fields = (VMStateField[]) {
        VMSTATE_VIRTIO_DEVICE,
        VMSTATE_END_OF_LIST()
    },
    .pre_save = virtio_net_pre_save,
};
2087

2088
static Property virtio_net_properties[] = {
2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
    DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
    DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_CSUM, true),
    DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
    DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_TSO4, true),
    DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_TSO6, true),
    DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_ECN, true),
    DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_UFO, true),
    DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_ANNOUNCE, true),
    DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_TSO4, true),
    DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_TSO6, true),
    DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_ECN, true),
    DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_UFO, true),
    DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
                    VIRTIO_NET_F_MRG_RXBUF, true),
    DEFINE_PROP_BIT("status", VirtIONet, host_features,
                    VIRTIO_NET_F_STATUS, true),
    DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_VQ, true),
    DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_RX, true),
    DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_VLAN, true),
    DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_RX_EXTRA, true),
    DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_MAC_ADDR, true),
    DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
    DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2128 2129
    DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
    DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2130
                       TX_TIMER_INTERVAL),
2131 2132
    DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
    DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2133 2134
    DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
                       VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2135 2136
    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2137
    DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2138 2139
    DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
                     true),
2140 2141 2142 2143 2144 2145 2146
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_net_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2147

2148
    dc->props = virtio_net_properties;
2149
    dc->vmsd = &vmstate_virtio_net;
2150
    set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2151
    vdc->realize = virtio_net_device_realize;
2152
    vdc->unrealize = virtio_net_device_unrealize;
2153 2154 2155 2156 2157 2158 2159 2160 2161
    vdc->get_config = virtio_net_get_config;
    vdc->set_config = virtio_net_set_config;
    vdc->get_features = virtio_net_get_features;
    vdc->set_features = virtio_net_set_features;
    vdc->bad_features = virtio_net_bad_features;
    vdc->reset = virtio_net_reset;
    vdc->set_status = virtio_net_set_status;
    vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
    vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2162
    vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2163
    vdc->vmsd = &vmstate_virtio_net_device;
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
}

static const TypeInfo virtio_net_info = {
    .name = TYPE_VIRTIO_NET,
    .parent = TYPE_VIRTIO_DEVICE,
    .instance_size = sizeof(VirtIONet),
    .instance_init = virtio_net_instance_init,
    .class_init = virtio_net_class_init,
};

static void virtio_register_types(void)
{
    type_register_static(&virtio_net_info);
}

type_init(virtio_register_types)