virtio-net.c 66.8 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Virtio Network Device
 *
 * Copyright IBM, Corp. 2007
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

P
Peter Maydell 已提交
14
#include "qemu/osdep.h"
15
#include "qemu/iov.h"
P
Paolo Bonzini 已提交
16
#include "hw/virtio/virtio.h"
P
Paolo Bonzini 已提交
17
#include "net/net.h"
18
#include "net/checksum.h"
19
#include "net/tap.h"
20 21
#include "qemu/error-report.h"
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
22 23
#include "hw/virtio/virtio-net.h"
#include "net/vhost_net.h"
24
#include "hw/virtio/virtio-bus.h"
25
#include "qapi/qmp/qjson.h"
26
#include "qapi-event.h"
27
#include "hw/virtio/virtio-access.h"
28
#include "migration/misc.h"
A
aliguori 已提交
29

30
#define VIRTIO_NET_VM_VERSION    11
31

32
#define MAC_TABLE_ENTRIES    64
33
#define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
34

35 36
/* previously fixed value */
#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
37 38
#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256

39 40
/* for now, only allow larger queues; with virtio-1, guest can downsize */
#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
41
#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
42

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * Calculate the number of bytes up to and including the given 'field' of
 * 'container'.
 */
#define endof(container, field) \
    (offsetof(container, field) + sizeof(((container *)0)->field))

typedef struct VirtIOFeature {
    uint32_t flags;
    size_t end;
} VirtIOFeature;

static VirtIOFeature feature_sizes[] = {
    {.flags = 1 << VIRTIO_NET_F_MAC,
     .end = endof(struct virtio_net_config, mac)},
    {.flags = 1 << VIRTIO_NET_F_STATUS,
     .end = endof(struct virtio_net_config, status)},
    {.flags = 1 << VIRTIO_NET_F_MQ,
     .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
62 63
    {.flags = 1 << VIRTIO_NET_F_MTU,
     .end = endof(struct virtio_net_config, mtu)},
64 65 66
    {}
};

J
Jason Wang 已提交
67
static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
68 69 70
{
    VirtIONet *n = qemu_get_nic_opaque(nc);

J
Jason Wang 已提交
71
    return &n->vqs[nc->queue_index];
72
}
J
Jason Wang 已提交
73 74 75 76 77 78

static int vq2q(int queue_index)
{
    return queue_index / 2;
}

A
aliguori 已提交
79 80 81 82
/* TODO
 * - we could suppress RX interrupt if we were so inclined.
 */

83
static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
A
aliguori 已提交
84
{
85
    VirtIONet *n = VIRTIO_NET(vdev);
A
aliguori 已提交
86 87
    struct virtio_net_config netcfg;

88 89
    virtio_stw_p(vdev, &netcfg.status, n->status);
    virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
90
    virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
91
    memcpy(netcfg.mac, n->mac, ETH_ALEN);
92
    memcpy(config, &netcfg, n->config_size);
A
aliguori 已提交
93 94
}

95 96
static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
{
97
    VirtIONet *n = VIRTIO_NET(vdev);
98
    struct virtio_net_config netcfg = {};
99

100
    memcpy(&netcfg, config, n->config_size);
101

102 103
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
        !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
104
        memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
105
        memcpy(n->mac, netcfg.mac, ETH_ALEN);
J
Jason Wang 已提交
106
        qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
107 108 109
    }
}

110 111
static bool virtio_net_started(VirtIONet *n, uint8_t status)
{
112
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
113
    return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
114
        (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
115 116
}

J
Jason Wang 已提交
117 118 119 120 121 122 123 124 125 126
static void virtio_net_announce_timer(void *opaque)
{
    VirtIONet *n = opaque;
    VirtIODevice *vdev = VIRTIO_DEVICE(n);

    n->announce_counter--;
    n->status |= VIRTIO_NET_S_ANNOUNCE;
    virtio_notify_config(vdev);
}

127
static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
128
{
129
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
J
Jason Wang 已提交
130
    NetClientState *nc = qemu_get_queue(n->nic);
J
Jason Wang 已提交
131
    int queues = n->multiqueue ? n->max_queues : 1;
J
Jason Wang 已提交
132

133
    if (!get_vhost_net(nc->peer)) {
134 135
        return;
    }
J
Jason Wang 已提交
136

137 138
    if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
        !!n->vhost_started) {
139 140 141
        return;
    }
    if (!n->vhost_started) {
142 143
        int r, i;

144 145 146 147 148 149 150
        if (n->needs_vnet_hdr_swap) {
            error_report("backend does not support %s vnet headers; "
                         "falling back on userspace virtio",
                         virtio_is_big_endian(vdev) ? "BE" : "LE");
            return;
        }

151 152 153 154 155 156 157 158 159 160 161
        /* Any packets outstanding? Purge them to avoid touching rings
         * when vhost is running.
         */
        for (i = 0;  i < queues; i++) {
            NetClientState *qnc = qemu_get_subqueue(n->nic, i);

            /* Purge both directions: TX and RX. */
            qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
            qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
        }

162 163 164 165 166 167 168 169 170 171
        if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
            r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
            if (r < 0) {
                error_report("%uBytes MTU not supported by the backend",
                             n->net_conf.mtu);

                return;
            }
        }

172
        n->vhost_started = 1;
173
        r = vhost_net_start(vdev, n->nic->ncs, queues);
174
        if (r < 0) {
175 176
            error_report("unable to start vhost net: %d: "
                         "falling back on userspace virtio", -r);
177
            n->vhost_started = 0;
178 179
        }
    } else {
180
        vhost_net_stop(vdev, n->nic->ncs, queues);
181 182 183 184
        n->vhost_started = 0;
    }
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
                                          NetClientState *peer,
                                          bool enable)
{
    if (virtio_is_big_endian(vdev)) {
        return qemu_set_vnet_be(peer, enable);
    } else {
        return qemu_set_vnet_le(peer, enable);
    }
}

static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
                                       int queues, bool enable)
{
    int i;

    for (i = 0; i < queues; i++) {
        if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
            enable) {
            while (--i >= 0) {
                virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
            }

            return true;
        }
    }

    return false;
}

static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    int queues = n->multiqueue ? n->max_queues : 1;

    if (virtio_net_started(n, status)) {
        /* Before using the device, we tell the network backend about the
         * endianness to use when parsing vnet headers. If the backend
         * can't do it, we fallback onto fixing the headers in the core
         * virtio-net code.
         */
        n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
                                                            queues, true);
    } else if (virtio_net_started(n, vdev->status)) {
        /* After using the device, we need to reset the network backend to
         * the default (guest native endianness), otherwise the guest may
         * lose network connectivity if it is rebooted into a different
         * endianness.
         */
        virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
    }
}

238 239 240 241 242 243 244 245
static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
{
    unsigned int dropped = virtqueue_drop_all(vq);
    if (dropped) {
        virtio_notify(vdev, vq);
    }
}

246 247
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
248
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
249 250 251
    VirtIONetQueue *q;
    int i;
    uint8_t queue_status;
252

253
    virtio_net_vnet_endian_status(n, status);
254 255
    virtio_net_vhost_status(n, status);

J
Jason Wang 已提交
256
    for (i = 0; i < n->max_queues; i++) {
257 258
        NetClientState *ncs = qemu_get_subqueue(n->nic, i);
        bool queue_started;
J
Jason Wang 已提交
259
        q = &n->vqs[i];
260

J
Jason Wang 已提交
261 262
        if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
            queue_status = 0;
263
        } else {
J
Jason Wang 已提交
264
            queue_status = status;
265
        }
266 267 268 269 270 271
        queue_started =
            virtio_net_started(n, queue_status) && !n->vhost_started;

        if (queue_started) {
            qemu_flush_queued_packets(ncs);
        }
J
Jason Wang 已提交
272 273 274 275 276

        if (!q->tx_waiting) {
            continue;
        }

277
        if (queue_started) {
J
Jason Wang 已提交
278
            if (q->tx_timer) {
279 280
                timer_mod(q->tx_timer,
                               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
J
Jason Wang 已提交
281 282 283
            } else {
                qemu_bh_schedule(q->tx_bh);
            }
284
        } else {
J
Jason Wang 已提交
285
            if (q->tx_timer) {
286
                timer_del(q->tx_timer);
J
Jason Wang 已提交
287 288 289
            } else {
                qemu_bh_cancel(q->tx_bh);
            }
290 291 292 293 294 295 296 297
            if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
                (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
                /* if tx is waiting we are likely have some packets in tx queue
                 * and disabled notification */
                q->tx_waiting = 0;
                virtio_queue_set_notification(q->tx_vq, 1);
                virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
            }
298 299 300 301
        }
    }
}

302
static void virtio_net_set_link_status(NetClientState *nc)
303
{
J
Jason Wang 已提交
304
    VirtIONet *n = qemu_get_nic_opaque(nc);
305
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
306 307
    uint16_t old_status = n->status;

M
Mark McLoughlin 已提交
308
    if (nc->link_down)
309 310 311 312 313
        n->status &= ~VIRTIO_NET_S_LINK_UP;
    else
        n->status |= VIRTIO_NET_S_LINK_UP;

    if (n->status != old_status)
314
        virtio_notify_config(vdev);
315

316
    virtio_net_set_status(vdev, vdev->status);
317 318
}

319 320 321 322 323
static void rxfilter_notify(NetClientState *nc)
{
    VirtIONet *n = qemu_get_nic_opaque(nc);

    if (nc->rxfilter_notify_enabled) {
324
        gchar *path = object_get_canonical_path(OBJECT(n->qdev));
325 326
        qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
                                              n->netclient_name, path, &error_abort);
327
        g_free(path);
328 329 330 331 332 333

        /* disable event notification to avoid events flooding */
        nc->rxfilter_notify_enabled = 0;
    }
}

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
static intList *get_vlan_table(VirtIONet *n)
{
    intList *list, *entry;
    int i, j;

    list = NULL;
    for (i = 0; i < MAX_VLAN >> 5; i++) {
        for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
            if (n->vlans[i] & (1U << j)) {
                entry = g_malloc0(sizeof(*entry));
                entry->value = (i << 5) + j;
                entry->next = list;
                list = entry;
            }
        }
    }

    return list;
}

354 355 356
static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
{
    VirtIONet *n = qemu_get_nic_opaque(nc);
357
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
358 359
    RxFilterInfo *info;
    strList *str_list, *entry;
360
    int i;
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385

    info = g_malloc0(sizeof(*info));
    info->name = g_strdup(nc->name);
    info->promiscuous = n->promisc;

    if (n->nouni) {
        info->unicast = RX_STATE_NONE;
    } else if (n->alluni) {
        info->unicast = RX_STATE_ALL;
    } else {
        info->unicast = RX_STATE_NORMAL;
    }

    if (n->nomulti) {
        info->multicast = RX_STATE_NONE;
    } else if (n->allmulti) {
        info->multicast = RX_STATE_ALL;
    } else {
        info->multicast = RX_STATE_NORMAL;
    }

    info->broadcast_allowed = n->nobcast;
    info->multicast_overflow = n->mac_table.multi_overflow;
    info->unicast_overflow = n->mac_table.uni_overflow;

386
    info->main_mac = qemu_mac_strdup_printf(n->mac);
387 388 389 390

    str_list = NULL;
    for (i = 0; i < n->mac_table.first_multi; i++) {
        entry = g_malloc0(sizeof(*entry));
391
        entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
392 393 394 395 396 397 398 399
        entry->next = str_list;
        str_list = entry;
    }
    info->unicast_table = str_list;

    str_list = NULL;
    for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
        entry = g_malloc0(sizeof(*entry));
400
        entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
401 402 403 404
        entry->next = str_list;
        str_list = entry;
    }
    info->multicast_table = str_list;
405
    info->vlan_table = get_vlan_table(n);
406

407
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
408 409 410 411 412
        info->vlan = RX_STATE_ALL;
    } else if (!info->vlan_table) {
        info->vlan = RX_STATE_NONE;
    } else {
        info->vlan = RX_STATE_NORMAL;
413 414 415 416 417 418 419 420
    }

    /* enable event notification after query */
    nc->rxfilter_notify_enabled = 1;

    return info;
}

421 422
static void virtio_net_reset(VirtIODevice *vdev)
{
423
    VirtIONet *n = VIRTIO_NET(vdev);
424 425 426 427

    /* Reset back to compatibility mode */
    n->promisc = 1;
    n->allmulti = 0;
428 429 430 431
    n->alluni = 0;
    n->nomulti = 0;
    n->nouni = 0;
    n->nobcast = 0;
J
Jason Wang 已提交
432 433
    /* multiqueue is disabled by default */
    n->curr_queues = 1;
J
Jason Wang 已提交
434 435 436
    timer_del(n->announce_timer);
    n->announce_counter = 0;
    n->status &= ~VIRTIO_NET_S_ANNOUNCE;
437

438
    /* Flush any MAC and VLAN filter table state */
439
    n->mac_table.in_use = 0;
440
    n->mac_table.first_multi = 0;
441 442
    n->mac_table.multi_overflow = 0;
    n->mac_table.uni_overflow = 0;
443
    memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
444
    memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
445
    qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
446
    memset(n->vlans, 0, MAX_VLAN >> 3);
447 448
}

449
static void peer_test_vnet_hdr(VirtIONet *n)
M
Mark McLoughlin 已提交
450
{
J
Jason Wang 已提交
451 452
    NetClientState *nc = qemu_get_queue(n->nic);
    if (!nc->peer) {
453
        return;
J
Jason Wang 已提交
454
    }
M
Mark McLoughlin 已提交
455

456
    n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
457
}
M
Mark McLoughlin 已提交
458

459 460
static int peer_has_vnet_hdr(VirtIONet *n)
{
M
Mark McLoughlin 已提交
461 462 463
    return n->has_vnet_hdr;
}

464 465 466 467 468
static int peer_has_ufo(VirtIONet *n)
{
    if (!peer_has_vnet_hdr(n))
        return 0;

469
    n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
470 471 472 473

    return n->has_ufo;
}

474 475
static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
                                       int version_1)
476
{
J
Jason Wang 已提交
477 478 479
    int i;
    NetClientState *nc;

480 481
    n->mergeable_rx_bufs = mergeable_rx_bufs;

482 483 484 485 486 487 488
    if (version_1) {
        n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
    } else {
        n->guest_hdr_len = n->mergeable_rx_bufs ?
            sizeof(struct virtio_net_hdr_mrg_rxbuf) :
            sizeof(struct virtio_net_hdr);
    }
489

J
Jason Wang 已提交
490 491 492 493
    for (i = 0; i < n->max_queues; i++) {
        nc = qemu_get_subqueue(n->nic, i);

        if (peer_has_vnet_hdr(n) &&
494 495
            qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
            qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
J
Jason Wang 已提交
496 497
            n->host_hdr_len = n->guest_hdr_len;
        }
498 499 500
    }
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
static int virtio_net_max_tx_queue_size(VirtIONet *n)
{
    NetClientState *peer = n->nic_conf.peers.ncs[0];

    /*
     * Backends other than vhost-user don't support max queue size.
     */
    if (!peer) {
        return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
    }

    if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
        return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
    }

    return VIRTQUEUE_MAX_SIZE;
}

J
Jason Wang 已提交
519 520 521 522 523 524 525 526
static int peer_attach(VirtIONet *n, int index)
{
    NetClientState *nc = qemu_get_subqueue(n->nic, index);

    if (!nc->peer) {
        return 0;
    }

527
    if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
528 529 530
        vhost_set_vring_enable(nc->peer, 1);
    }

531
    if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
J
Jason Wang 已提交
532 533 534
        return 0;
    }

535 536 537 538
    if (n->max_queues == 1) {
        return 0;
    }

J
Jason Wang 已提交
539 540 541 542 543 544 545 546 547 548 549
    return tap_enable(nc->peer);
}

static int peer_detach(VirtIONet *n, int index)
{
    NetClientState *nc = qemu_get_subqueue(n->nic, index);

    if (!nc->peer) {
        return 0;
    }

550
    if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
551 552 553
        vhost_set_vring_enable(nc->peer, 0);
    }

554
    if (nc->peer->info->type !=  NET_CLIENT_DRIVER_TAP) {
J
Jason Wang 已提交
555 556 557 558 559 560 561 562 563
        return 0;
    }

    return tap_disable(nc->peer);
}

static void virtio_net_set_queues(VirtIONet *n)
{
    int i;
564
    int r;
J
Jason Wang 已提交
565

566 567 568 569
    if (n->nic->peer_deleted) {
        return;
    }

J
Jason Wang 已提交
570 571
    for (i = 0; i < n->max_queues; i++) {
        if (i < n->curr_queues) {
572 573
            r = peer_attach(n, i);
            assert(!r);
J
Jason Wang 已提交
574
        } else {
575 576
            r = peer_detach(n, i);
            assert(!r);
J
Jason Wang 已提交
577 578 579 580
        }
    }
}

J
Jason Wang 已提交
581
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
J
Jason Wang 已提交
582

J
Jason Wang 已提交
583 584
static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
                                        Error **errp)
A
aliguori 已提交
585
{
586
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
587
    NetClientState *nc = qemu_get_queue(n->nic);
A
aliguori 已提交
588

589 590 591
    /* Firstly sync all virtio-net possible supported features */
    features |= n->host_features;

592
    virtio_add_feature(&features, VIRTIO_NET_F_MAC);
593

594
    if (!peer_has_vnet_hdr(n)) {
595 596 597 598
        virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
599

600 601 602 603
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
604
    }
M
Mark McLoughlin 已提交
605

606
    if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
607 608
        virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
        virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
M
Mark McLoughlin 已提交
609 610
    }

611
    if (!get_vhost_net(nc->peer)) {
612 613
        return features;
    }
614 615 616 617 618 619 620 621 622
    features = vhost_net_get_features(get_vhost_net(nc->peer), features);
    vdev->backend_features = features;

    if (n->mtu_bypass_backend &&
            (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
        features |= (1ULL << VIRTIO_NET_F_MTU);
    }

    return features;
A
aliguori 已提交
623 624
}

G
Gerd Hoffmann 已提交
625
static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
626
{
G
Gerd Hoffmann 已提交
627
    uint64_t features = 0;
628 629 630

    /* Linux kernel 2.6.25.  It understood MAC (as everyone must),
     * but also these: */
631 632 633 634 635
    virtio_add_feature(&features, VIRTIO_NET_F_MAC);
    virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
    virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
    virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
    virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
636

637
    return features;
638 639
}

640 641
static void virtio_net_apply_guest_offloads(VirtIONet *n)
{
642
    qemu_set_offload(qemu_get_queue(n->nic)->peer,
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
            !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
}

static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
{
    static const uint64_t guest_offloads_mask =
        (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
        (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
        (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
        (1ULL << VIRTIO_NET_F_GUEST_ECN)  |
        (1ULL << VIRTIO_NET_F_GUEST_UFO);

    return guest_offloads_mask & features;
}

static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    return virtio_net_guest_offloads_by_features(vdev->guest_features);
}

G
Gerd Hoffmann 已提交
668
static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
A
aliguori 已提交
669
{
670
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
671 672
    int i;

673 674 675 676 677
    if (n->mtu_bypass_backend &&
            !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
        features &= ~(1ULL << VIRTIO_NET_F_MTU);
    }

678
    virtio_net_set_multiqueue(n,
679
                              virtio_has_feature(features, VIRTIO_NET_F_MQ));
A
aliguori 已提交
680

681
    virtio_net_set_mrg_rx_bufs(n,
682 683 684 685
                               virtio_has_feature(features,
                                                  VIRTIO_NET_F_MRG_RXBUF),
                               virtio_has_feature(features,
                                                  VIRTIO_F_VERSION_1));
686 687

    if (n->has_vnet_hdr) {
688 689 690
        n->curr_guest_offloads =
            virtio_net_guest_offloads_by_features(features);
        virtio_net_apply_guest_offloads(n);
691
    }
J
Jason Wang 已提交
692 693 694 695

    for (i = 0;  i < n->max_queues; i++) {
        NetClientState *nc = qemu_get_subqueue(n->nic, i);

696
        if (!get_vhost_net(nc->peer)) {
J
Jason Wang 已提交
697 698
            continue;
        }
699
        vhost_net_ack_features(get_vhost_net(nc->peer), features);
D
David L Stevens 已提交
700
    }
701

702
    if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
703 704 705 706
        memset(n->vlans, 0, MAX_VLAN >> 3);
    } else {
        memset(n->vlans, 0xff, MAX_VLAN >> 3);
    }
A
aliguori 已提交
707 708
}

709
static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
710
                                     struct iovec *iov, unsigned int iov_cnt)
711 712
{
    uint8_t on;
713
    size_t s;
714
    NetClientState *nc = qemu_get_queue(n->nic);
715

716 717 718
    s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
    if (s != sizeof(on)) {
        return VIRTIO_NET_ERR;
719 720
    }

A
Amos Kong 已提交
721
    if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
722
        n->promisc = on;
A
Amos Kong 已提交
723
    } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
724
        n->allmulti = on;
A
Amos Kong 已提交
725
    } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
726
        n->alluni = on;
A
Amos Kong 已提交
727
    } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
728
        n->nomulti = on;
A
Amos Kong 已提交
729
    } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
730
        n->nouni = on;
A
Amos Kong 已提交
731
    } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
732
        n->nobcast = on;
733
    } else {
734
        return VIRTIO_NET_ERR;
735
    }
736

737 738
    rxfilter_notify(nc);

739 740 741
    return VIRTIO_NET_OK;
}

742 743 744 745 746 747 748
static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
                                     struct iovec *iov, unsigned int iov_cnt)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    uint64_t offloads;
    size_t s;

749
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
750 751 752 753 754 755 756 757 758 759 760
        return VIRTIO_NET_ERR;
    }

    s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
    if (s != sizeof(offloads)) {
        return VIRTIO_NET_ERR;
    }

    if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
        uint64_t supported_offloads;

J
Jason Wang 已提交
761 762
        offloads = virtio_ldq_p(vdev, &offloads);

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
        if (!n->has_vnet_hdr) {
            return VIRTIO_NET_ERR;
        }

        supported_offloads = virtio_net_supported_guest_offloads(n);
        if (offloads & ~supported_offloads) {
            return VIRTIO_NET_ERR;
        }

        n->curr_guest_offloads = offloads;
        virtio_net_apply_guest_offloads(n);

        return VIRTIO_NET_OK;
    } else {
        return VIRTIO_NET_ERR;
    }
}

781
static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
782
                                 struct iovec *iov, unsigned int iov_cnt)
783
{
784
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
785
    struct virtio_net_ctrl_mac mac_data;
786
    size_t s;
787
    NetClientState *nc = qemu_get_queue(n->nic);
788

789 790 791 792 793 794
    if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
        if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
            return VIRTIO_NET_ERR;
        }
        s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
        assert(s == sizeof(n->mac));
J
Jason Wang 已提交
795
        qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
796 797
        rxfilter_notify(nc);

798 799 800
        return VIRTIO_NET_OK;
    }

801
    if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
802
        return VIRTIO_NET_ERR;
803
    }
804

805 806 807 808 809
    int in_use = 0;
    int first_multi = 0;
    uint8_t uni_overflow = 0;
    uint8_t multi_overflow = 0;
    uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
810

811 812
    s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
                   sizeof(mac_data.entries));
813
    mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
814
    if (s != sizeof(mac_data.entries)) {
815
        goto error;
816 817
    }
    iov_discard_front(&iov, &iov_cnt, s);
818

819
    if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
820
        goto error;
821
    }
822 823

    if (mac_data.entries <= MAC_TABLE_ENTRIES) {
824
        s = iov_to_buf(iov, iov_cnt, 0, macs,
825 826
                       mac_data.entries * ETH_ALEN);
        if (s != mac_data.entries * ETH_ALEN) {
827
            goto error;
828
        }
829
        in_use += mac_data.entries;
830
    } else {
831
        uni_overflow = 1;
832 833
    }

834 835
    iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);

836
    first_multi = in_use;
837

838 839
    s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
                   sizeof(mac_data.entries));
840
    mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
841
    if (s != sizeof(mac_data.entries)) {
842
        goto error;
843 844 845
    }

    iov_discard_front(&iov, &iov_cnt, s);
846

847
    if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
848
        goto error;
849
    }
850

851
    if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
852
        s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
853 854
                       mac_data.entries * ETH_ALEN);
        if (s != mac_data.entries * ETH_ALEN) {
855
            goto error;
856
        }
857
        in_use += mac_data.entries;
858
    } else {
859
        multi_overflow = 1;
860 861
    }

862 863 864 865 866 867
    n->mac_table.in_use = in_use;
    n->mac_table.first_multi = first_multi;
    n->mac_table.uni_overflow = uni_overflow;
    n->mac_table.multi_overflow = multi_overflow;
    memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
    g_free(macs);
868 869
    rxfilter_notify(nc);

870
    return VIRTIO_NET_OK;
871 872

error:
873
    g_free(macs);
874
    return VIRTIO_NET_ERR;
875 876
}

877
static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
878
                                        struct iovec *iov, unsigned int iov_cnt)
879
{
880
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
881
    uint16_t vid;
882
    size_t s;
883
    NetClientState *nc = qemu_get_queue(n->nic);
884

885
    s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
886
    vid = virtio_lduw_p(vdev, &vid);
887
    if (s != sizeof(vid)) {
888 889 890 891 892 893 894 895 896 897 898 899 900
        return VIRTIO_NET_ERR;
    }

    if (vid >= MAX_VLAN)
        return VIRTIO_NET_ERR;

    if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
        n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
    else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
        n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
    else
        return VIRTIO_NET_ERR;

901 902
    rxfilter_notify(nc);

903 904 905
    return VIRTIO_NET_OK;
}

J
Jason Wang 已提交
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
                                      struct iovec *iov, unsigned int iov_cnt)
{
    if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
        n->status & VIRTIO_NET_S_ANNOUNCE) {
        n->status &= ~VIRTIO_NET_S_ANNOUNCE;
        if (n->announce_counter) {
            timer_mod(n->announce_timer,
                      qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
                      self_announce_delay(n->announce_counter));
        }
        return VIRTIO_NET_OK;
    } else {
        return VIRTIO_NET_ERR;
    }
}

J
Jason Wang 已提交
923
static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
924
                                struct iovec *iov, unsigned int iov_cnt)
J
Jason Wang 已提交
925
{
926
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
927 928 929
    struct virtio_net_ctrl_mq mq;
    size_t s;
    uint16_t queues;
J
Jason Wang 已提交
930

931 932
    s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
    if (s != sizeof(mq)) {
J
Jason Wang 已提交
933 934 935 936 937 938 939
        return VIRTIO_NET_ERR;
    }

    if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
        return VIRTIO_NET_ERR;
    }

940
    queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
J
Jason Wang 已提交
941

942 943 944
    if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
        queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
        queues > n->max_queues ||
J
Jason Wang 已提交
945 946 947 948
        !n->multiqueue) {
        return VIRTIO_NET_ERR;
    }

949
    n->curr_queues = queues;
J
Jason Wang 已提交
950 951
    /* stop the backend before changing the number of queues to avoid handling a
     * disabled queue */
952
    virtio_net_set_status(vdev, vdev->status);
J
Jason Wang 已提交
953 954 955 956
    virtio_net_set_queues(n);

    return VIRTIO_NET_OK;
}
957

958 959
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
960
    VirtIONet *n = VIRTIO_NET(vdev);
961 962
    struct virtio_net_ctrl_hdr ctrl;
    virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
963
    VirtQueueElement *elem;
964
    size_t s;
J
Jason Wang 已提交
965
    struct iovec *iov, *iov2;
966
    unsigned int iov_cnt;
967

968 969 970 971 972 973 974
    for (;;) {
        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
        if (!elem) {
            break;
        }
        if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
            iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
975 976 977 978
            virtio_error(vdev, "virtio-net ctrl missing headers");
            virtqueue_detach_element(vq, elem, 0);
            g_free(elem);
            break;
979 980
        }

981 982
        iov_cnt = elem->out_num;
        iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
983 984 985 986
        s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
        iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
        if (s != sizeof(ctrl)) {
            status = VIRTIO_NET_ERR;
A
Amos Kong 已提交
987
        } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
988 989 990 991 992
            status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
        } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
            status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
        } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
            status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
J
Jason Wang 已提交
993 994
        } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
            status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
J
Jason Wang 已提交
995
        } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
996
            status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
997 998
        } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
            status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
999 1000
        }

1001
        s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1002
        assert(s == sizeof(status));
1003

1004
        virtqueue_push(vq, elem, sizeof(status));
1005
        virtio_notify(vdev, vq);
J
Jason Wang 已提交
1006
        g_free(iov2);
1007
        g_free(elem);
1008 1009 1010
    }
}

A
aliguori 已提交
1011 1012 1013 1014
/* RX */

static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
{
1015
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1016
    int queue_index = vq2q(virtio_get_queue_index(vq));
1017

J
Jason Wang 已提交
1018
    qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
A
aliguori 已提交
1019 1020
}

1021
static int virtio_net_can_receive(NetClientState *nc)
A
aliguori 已提交
1022
{
J
Jason Wang 已提交
1023
    VirtIONet *n = qemu_get_nic_opaque(nc);
1024
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
J
Jason Wang 已提交
1025
    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1026

1027
    if (!vdev->vm_running) {
1028 1029
        return 0;
    }
1030

J
Jason Wang 已提交
1031 1032 1033 1034
    if (nc->queue_index >= n->curr_queues) {
        return 0;
    }

1035
    if (!virtio_queue_ready(q->rx_vq) ||
1036
        !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
A
aliguori 已提交
1037
        return 0;
1038
    }
A
aliguori 已提交
1039

1040 1041 1042
    return 1;
}

1043
static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1044
{
1045 1046
    VirtIONet *n = q->n;
    if (virtio_queue_empty(q->rx_vq) ||
A
aliguori 已提交
1047
        (n->mergeable_rx_bufs &&
1048 1049
         !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
        virtio_queue_set_notification(q->rx_vq, 1);
1050 1051 1052 1053 1054

        /* To avoid a race condition where the guest has made some buffers
         * available after the above check but before notification was
         * enabled, check for available buffers again.
         */
1055
        if (virtio_queue_empty(q->rx_vq) ||
1056
            (n->mergeable_rx_bufs &&
1057
             !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1058
            return 0;
1059
        }
A
aliguori 已提交
1060 1061
    }

1062
    virtio_queue_set_notification(q->rx_vq, 0);
A
aliguori 已提交
1063 1064 1065
    return 1;
}

1066
static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1067
{
1068 1069 1070 1071
    virtio_tswap16s(vdev, &hdr->hdr_len);
    virtio_tswap16s(vdev, &hdr->gso_size);
    virtio_tswap16s(vdev, &hdr->csum_start);
    virtio_tswap16s(vdev, &hdr->csum_offset);
1072 1073
}

A
Anthony Liguori 已提交
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
 * it never finds out that the packets don't have valid checksums.  This
 * causes dhclient to get upset.  Fedora's carried a patch for ages to
 * fix this with Xen but it hasn't appeared in an upstream release of
 * dhclient yet.
 *
 * To avoid breaking existing guests, we catch udp packets and add
 * checksums.  This is terrible but it's better than hacking the guest
 * kernels.
 *
 * N.B. if we introduce a zero-copy API, this operation is no longer free so
 * we should provide a mechanism to disable it to avoid polluting the host
 * cache.
 */
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
M
Michael S. Tsirkin 已提交
1089
                                        uint8_t *buf, size_t size)
A
Anthony Liguori 已提交
1090 1091 1092 1093 1094 1095
{
    if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
        (size > 27 && size < 1500) && /* normal sized MTU */
        (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
        (buf[23] == 17) && /* ip.protocol == UDP */
        (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
M
Michael S. Tsirkin 已提交
1096
        net_checksum_calculate(buf, size);
A
Anthony Liguori 已提交
1097 1098 1099 1100
        hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
    }
}

1101 1102
static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
                           const void *buf, size_t size)
A
aliguori 已提交
1103
{
M
Mark McLoughlin 已提交
1104
    if (n->has_vnet_hdr) {
M
Michael S. Tsirkin 已提交
1105 1106
        /* FIXME this cast is evil */
        void *wbuf = (void *)buf;
1107 1108
        work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
                                    size - n->host_hdr_len);
1109 1110 1111 1112

        if (n->needs_vnet_hdr_swap) {
            virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
        }
1113
        iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
M
Michael S. Tsirkin 已提交
1114 1115 1116 1117 1118 1119
    } else {
        struct virtio_net_hdr hdr = {
            .flags = 0,
            .gso_type = VIRTIO_NET_HDR_GSO_NONE
        };
        iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
M
Mark McLoughlin 已提交
1120
    }
A
aliguori 已提交
1121 1122
}

1123 1124 1125
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
{
    static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1126
    static const uint8_t vlan[] = {0x81, 0x00};
1127
    uint8_t *ptr = (uint8_t *)buf;
1128
    int i;
1129 1130 1131 1132

    if (n->promisc)
        return 1;

1133
    ptr += n->host_hdr_len;
M
Mark McLoughlin 已提交
1134

1135
    if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1136
        int vid = lduw_be_p(ptr + 14) & 0xfff;
1137 1138 1139 1140
        if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
            return 0;
    }

1141 1142
    if (ptr[0] & 1) { // multicast
        if (!memcmp(ptr, bcast, sizeof(bcast))) {
1143 1144 1145
            return !n->nobcast;
        } else if (n->nomulti) {
            return 0;
1146
        } else if (n->allmulti || n->mac_table.multi_overflow) {
1147 1148
            return 1;
        }
1149 1150 1151 1152 1153 1154

        for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
            if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
                return 1;
            }
        }
1155
    } else { // unicast
1156 1157 1158
        if (n->nouni) {
            return 0;
        } else if (n->alluni || n->mac_table.uni_overflow) {
1159 1160
            return 1;
        } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1161 1162
            return 1;
        }
1163

1164 1165 1166 1167 1168
        for (i = 0; i < n->mac_table.first_multi; i++) {
            if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
                return 1;
            }
        }
1169 1170
    }

1171 1172 1173
    return 0;
}

1174 1175
static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
                                      size_t size)
A
aliguori 已提交
1176
{
J
Jason Wang 已提交
1177
    VirtIONet *n = qemu_get_nic_opaque(nc);
J
Jason Wang 已提交
1178
    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1179
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1180 1181 1182
    struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
    struct virtio_net_hdr_mrg_rxbuf mhdr;
    unsigned mhdr_cnt = 0;
M
Michael S. Tsirkin 已提交
1183
    size_t offset, i, guest_offset;
A
aliguori 已提交
1184

J
Jason Wang 已提交
1185
    if (!virtio_net_can_receive(nc)) {
1186
        return -1;
J
Jason Wang 已提交
1187
    }
1188

1189
    /* hdr_len refers to the header we supply to the guest */
1190
    if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1191
        return 0;
1192
    }
A
aliguori 已提交
1193

1194
    if (!receive_filter(n, buf, size))
1195
        return size;
1196

A
aliguori 已提交
1197 1198 1199
    offset = i = 0;

    while (offset < size) {
1200
        VirtQueueElement *elem;
A
aliguori 已提交
1201
        int len, total;
1202
        const struct iovec *sg;
A
aliguori 已提交
1203

A
Amit Shah 已提交
1204
        total = 0;
A
aliguori 已提交
1205

1206 1207
        elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
        if (!elem) {
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
            if (i) {
                virtio_error(vdev, "virtio-net unexpected empty queue: "
                             "i %zd mergeable %d offset %zd, size %zd, "
                             "guest hdr len %zd, host hdr len %zd "
                             "guest features 0x%" PRIx64,
                             i, n->mergeable_rx_bufs, offset, size,
                             n->guest_hdr_len, n->host_hdr_len,
                             vdev->guest_features);
            }
            return -1;
A
aliguori 已提交
1218 1219
        }

1220
        if (elem->in_num < 1) {
1221 1222 1223 1224 1225
            virtio_error(vdev,
                         "virtio-net receive queue contains no in buffers");
            virtqueue_detach_element(q->rx_vq, elem, 0);
            g_free(elem);
            return -1;
A
aliguori 已提交
1226 1227
        }

1228
        sg = elem->in_sg;
A
aliguori 已提交
1229
        if (i == 0) {
1230
            assert(offset == 0);
1231 1232
            if (n->mergeable_rx_bufs) {
                mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1233
                                    sg, elem->in_num,
1234 1235 1236
                                    offsetof(typeof(mhdr), num_buffers),
                                    sizeof(mhdr.num_buffers));
            }
A
aliguori 已提交
1237

1238
            receive_header(n, sg, elem->in_num, buf, size);
1239
            offset = n->host_hdr_len;
1240
            total += n->guest_hdr_len;
M
Michael S. Tsirkin 已提交
1241 1242 1243
            guest_offset = n->guest_hdr_len;
        } else {
            guest_offset = 0;
A
aliguori 已提交
1244 1245 1246
        }

        /* copy in packet.  ugh */
1247
        len = iov_from_buf(sg, elem->in_num, guest_offset,
1248
                           buf + offset, size - offset);
A
aliguori 已提交
1249
        total += len;
1250 1251 1252 1253 1254
        offset += len;
        /* If buffers can't be merged, at this point we
         * must have consumed the complete packet.
         * Otherwise, drop it. */
        if (!n->mergeable_rx_bufs && offset < size) {
1255
            virtqueue_unpop(q->rx_vq, elem, total);
1256
            g_free(elem);
1257 1258
            return size;
        }
A
aliguori 已提交
1259 1260

        /* signal other side */
1261 1262
        virtqueue_fill(q->rx_vq, elem, total, i++);
        g_free(elem);
A
aliguori 已提交
1263 1264
    }

1265
    if (mhdr_cnt) {
1266
        virtio_stw_p(vdev, &mhdr.num_buffers, i);
1267 1268 1269
        iov_from_buf(mhdr_sg, mhdr_cnt,
                     0,
                     &mhdr.num_buffers, sizeof mhdr.num_buffers);
1270
    }
A
aliguori 已提交
1271

1272
    virtqueue_flush(q->rx_vq, i);
1273
    virtio_notify(vdev, q->rx_vq);
1274 1275

    return size;
A
aliguori 已提交
1276 1277
}

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
                                  size_t size)
{
    ssize_t r;

    rcu_read_lock();
    r = virtio_net_receive_rcu(nc, buf, size);
    rcu_read_unlock();
    return r;
}

1289
static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1290

1291
static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1292
{
J
Jason Wang 已提交
1293
    VirtIONet *n = qemu_get_nic_opaque(nc);
J
Jason Wang 已提交
1294
    VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1295
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1296

1297
    virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1298
    virtio_notify(vdev, q->tx_vq);
1299

1300 1301
    g_free(q->async_tx.elem);
    q->async_tx.elem = NULL;
1302

1303 1304
    virtio_queue_set_notification(q->tx_vq, 1);
    virtio_net_flush_tx(q);
1305 1306
}

A
aliguori 已提交
1307
/* TX */
1308
static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
A
aliguori 已提交
1309
{
1310
    VirtIONet *n = q->n;
1311
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1312
    VirtQueueElement *elem;
1313
    int32_t num_packets = 0;
J
Jason Wang 已提交
1314
    int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1315
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1316 1317
        return num_packets;
    }
A
aliguori 已提交
1318

1319
    if (q->async_tx.elem) {
1320
        virtio_queue_set_notification(q->tx_vq, 0);
1321
        return num_packets;
1322 1323
    }

1324
    for (;;) {
J
Jason Wang 已提交
1325
        ssize_t ret;
1326 1327
        unsigned int out_num;
        struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
J
Jason Wang 已提交
1328
        struct virtio_net_hdr_mrg_rxbuf mhdr;
A
aliguori 已提交
1329

1330 1331 1332 1333 1334 1335 1336
        elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
        if (!elem) {
            break;
        }

        out_num = elem->out_num;
        out_sg = elem->out_sg;
M
Michael S. Tsirkin 已提交
1337
        if (out_num < 1) {
1338 1339 1340 1341
            virtio_error(vdev, "virtio-net header not in first element");
            virtqueue_detach_element(q->tx_vq, elem, 0);
            g_free(elem);
            return -EINVAL;
A
aliguori 已提交
1342 1343
        }

1344
        if (n->has_vnet_hdr) {
J
Jason Wang 已提交
1345 1346
            if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
                n->guest_hdr_len) {
1347 1348 1349 1350
                virtio_error(vdev, "virtio-net header incorrect");
                virtqueue_detach_element(q->tx_vq, elem, 0);
                g_free(elem);
                return -EINVAL;
1351
            }
1352
            if (n->needs_vnet_hdr_swap) {
J
Jason Wang 已提交
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
                virtio_net_hdr_swap(vdev, (void *) &mhdr);
                sg2[0].iov_base = &mhdr;
                sg2[0].iov_len = n->guest_hdr_len;
                out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
                                   out_sg, out_num,
                                   n->guest_hdr_len, -1);
                if (out_num == VIRTQUEUE_MAX_SIZE) {
                    goto drop;
		}
                out_num += 1;
                out_sg = sg2;
	    }
1365
        }
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
        /*
         * If host wants to see the guest header as is, we can
         * pass it on unchanged. Otherwise, copy just the parts
         * that host is interested in.
         */
        assert(n->host_hdr_len <= n->guest_hdr_len);
        if (n->host_hdr_len != n->guest_hdr_len) {
            unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
                                       out_sg, out_num,
                                       0, n->host_hdr_len);
            sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
                             out_sg, out_num,
                             n->guest_hdr_len, -1);
            out_num = sg_num;
            out_sg = sg;
A
aliguori 已提交
1381 1382
        }

J
Jason Wang 已提交
1383 1384
        ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
                                      out_sg, out_num, virtio_net_tx_complete);
1385
        if (ret == 0) {
1386 1387
            virtio_queue_set_notification(q->tx_vq, 0);
            q->async_tx.elem = elem;
1388
            return -EBUSY;
1389 1390
        }

J
Jason Wang 已提交
1391
drop:
1392
        virtqueue_push(q->tx_vq, elem, 0);
1393
        virtio_notify(vdev, q->tx_vq);
1394
        g_free(elem);
1395 1396 1397 1398

        if (++num_packets >= n->tx_burst) {
            break;
        }
A
aliguori 已提交
1399
    }
1400
    return num_packets;
A
aliguori 已提交
1401 1402
}

1403
static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
A
aliguori 已提交
1404
{
1405
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1406
    VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
A
aliguori 已提交
1407

1408 1409 1410 1411 1412
    if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
        virtio_net_drop_tx_queue_data(vdev, vq);
        return;
    }

1413
    /* This happens when device was stopped but VCPU wasn't. */
1414
    if (!vdev->vm_running) {
1415
        q->tx_waiting = 1;
1416 1417 1418
        return;
    }

1419
    if (q->tx_waiting) {
A
aliguori 已提交
1420
        virtio_queue_set_notification(vq, 1);
1421
        timer_del(q->tx_timer);
1422
        q->tx_waiting = 0;
1423 1424 1425
        if (virtio_net_flush_tx(q) == -EINVAL) {
            return;
        }
A
aliguori 已提交
1426
    } else {
1427 1428
        timer_mod(q->tx_timer,
                       qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1429
        q->tx_waiting = 1;
A
aliguori 已提交
1430 1431 1432 1433
        virtio_queue_set_notification(vq, 0);
    }
}

1434 1435
static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
{
1436
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1437
    VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1438

1439 1440 1441 1442 1443
    if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
        virtio_net_drop_tx_queue_data(vdev, vq);
        return;
    }

1444
    if (unlikely(q->tx_waiting)) {
1445 1446
        return;
    }
1447
    q->tx_waiting = 1;
1448
    /* This happens when device was stopped but VCPU wasn't. */
1449
    if (!vdev->vm_running) {
1450 1451
        return;
    }
1452
    virtio_queue_set_notification(vq, 0);
1453
    qemu_bh_schedule(q->tx_bh);
1454 1455
}

A
aliguori 已提交
1456 1457
static void virtio_net_tx_timer(void *opaque)
{
1458 1459
    VirtIONetQueue *q = opaque;
    VirtIONet *n = q->n;
1460
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1461 1462 1463 1464 1465 1466
    /* This happens when device was stopped but BH wasn't. */
    if (!vdev->vm_running) {
        /* Make sure tx waiting is set, so we'll run when restarted. */
        assert(q->tx_waiting);
        return;
    }
A
aliguori 已提交
1467

1468
    q->tx_waiting = 0;
A
aliguori 已提交
1469 1470

    /* Just in case the driver is not ready on more */
1471
    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
A
aliguori 已提交
1472
        return;
1473
    }
A
aliguori 已提交
1474

1475 1476
    virtio_queue_set_notification(q->tx_vq, 1);
    virtio_net_flush_tx(q);
A
aliguori 已提交
1477 1478
}

1479 1480
static void virtio_net_tx_bh(void *opaque)
{
1481 1482
    VirtIONetQueue *q = opaque;
    VirtIONet *n = q->n;
1483
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1484 1485
    int32_t ret;

1486 1487 1488 1489 1490 1491
    /* This happens when device was stopped but BH wasn't. */
    if (!vdev->vm_running) {
        /* Make sure tx waiting is set, so we'll run when restarted. */
        assert(q->tx_waiting);
        return;
    }
1492

1493
    q->tx_waiting = 0;
1494 1495

    /* Just in case the driver is not ready on more */
1496
    if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1497
        return;
1498
    }
1499

1500
    ret = virtio_net_flush_tx(q);
1501 1502 1503
    if (ret == -EBUSY || ret == -EINVAL) {
        return; /* Notification re-enable handled by tx_complete or device
                 * broken */
1504 1505 1506 1507 1508
    }

    /* If we flush a full burst of packets, assume there are
     * more coming and immediately reschedule */
    if (ret >= n->tx_burst) {
1509 1510
        qemu_bh_schedule(q->tx_bh);
        q->tx_waiting = 1;
1511 1512 1513 1514 1515 1516
        return;
    }

    /* If less than a full burst, re-enable notification and flush
     * anything that may have come in while we weren't looking.  If
     * we find something, assume the guest is still active and reschedule */
1517
    virtio_queue_set_notification(q->tx_vq, 1);
1518 1519 1520 1521
    ret = virtio_net_flush_tx(q);
    if (ret == -EINVAL) {
        return;
    } else if (ret > 0) {
1522 1523 1524
        virtio_queue_set_notification(q->tx_vq, 0);
        qemu_bh_schedule(q->tx_bh);
        q->tx_waiting = 1;
1525 1526 1527
    }
}

1528 1529 1530 1531
static void virtio_net_add_queue(VirtIONet *n, int index)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);

1532 1533
    n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
                                           virtio_net_handle_rx);
1534

1535 1536
    if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
        n->vqs[index].tx_vq =
1537 1538
            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
                             virtio_net_handle_tx_timer);
1539 1540 1541 1542 1543
        n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                              virtio_net_tx_timer,
                                              &n->vqs[index]);
    } else {
        n->vqs[index].tx_vq =
1544 1545
            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
                             virtio_net_handle_tx_bh);
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
        n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
    }

    n->vqs[index].tx_waiting = 0;
    n->vqs[index].n = n;
}

static void virtio_net_del_queue(VirtIONet *n, int index)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    VirtIONetQueue *q = &n->vqs[index];
    NetClientState *nc = qemu_get_subqueue(n->nic, index);

    qemu_purge_queued_packets(nc);

    virtio_del_queue(vdev, index * 2);
    if (q->tx_timer) {
        timer_del(q->tx_timer);
        timer_free(q->tx_timer);
1565
        q->tx_timer = NULL;
1566 1567
    } else {
        qemu_bh_delete(q->tx_bh);
1568
        q->tx_bh = NULL;
1569
    }
1570
    q->tx_waiting = 0;
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
    virtio_del_queue(vdev, index * 2 + 1);
}

static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    int old_num_queues = virtio_get_num_queues(vdev);
    int new_num_queues = new_max_queues * 2 + 1;
    int i;

    assert(old_num_queues >= 3);
    assert(old_num_queues % 2 == 1);

    if (old_num_queues == new_num_queues) {
        return;
    }

    /*
     * We always need to remove and add ctrl vq if
     * old_num_queues != new_num_queues. Remove ctrl_vq first,
     * and then we only enter one of the following too loops.
     */
    virtio_del_queue(vdev, old_num_queues - 1);

    for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
        /* new_num_queues < old_num_queues */
        virtio_net_del_queue(n, i / 2);
    }

    for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
        /* new_num_queues > old_num_queues */
        virtio_net_add_queue(n, i / 2);
    }

    /* add ctrl_vq last */
    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
}

J
Jason Wang 已提交
1609
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
J
Jason Wang 已提交
1610
{
1611 1612
    int max = multiqueue ? n->max_queues : 1;

J
Jason Wang 已提交
1613
    n->multiqueue = multiqueue;
1614
    virtio_net_change_num_queues(n, max);
J
Jason Wang 已提交
1615 1616 1617 1618

    virtio_net_set_queues(n);
}

1619
static int virtio_net_post_load_device(void *opaque, int version_id)
1620
{
1621 1622
    VirtIONet *n = opaque;
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
1623
    int i, link_down;
A
aliguori 已提交
1624

1625
    virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
1626 1627
                               virtio_vdev_has_feature(vdev,
                                                       VIRTIO_F_VERSION_1));
A
aliguori 已提交
1628

1629
    /* MAC_TABLE_ENTRIES may be different from the saved image */
1630
    if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
1631
        n->mac_table.in_use = 0;
1632
    }
1633

1634
    if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1635 1636 1637 1638 1639 1640 1641
        n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
    }

    if (peer_has_vnet_hdr(n)) {
        virtio_net_apply_guest_offloads(n);
    }

1642 1643
    virtio_net_set_queues(n);

1644 1645 1646 1647 1648 1649 1650
    /* Find the first multicast entry in the saved MAC filter */
    for (i = 0; i < n->mac_table.in_use; i++) {
        if (n->mac_table.macs[i * ETH_ALEN] & 1) {
            break;
        }
    }
    n->mac_table.first_multi = i;
1651 1652 1653

    /* nc.link_down can't be migrated, so infer link_down according
     * to link status bit in n->status */
1654 1655 1656 1657
    link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
    for (i = 0; i < n->max_queues; i++) {
        qemu_get_subqueue(n->nic, i)->link_down = link_down;
    }
1658

1659 1660 1661 1662 1663 1664
    if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
        virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
        n->announce_counter = SELF_ANNOUNCE_ROUNDS;
        timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
    }

A
aliguori 已提交
1665 1666 1667
    return 0;
}

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
/* tx_waiting field of a VirtIONetQueue */
static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
    .name = "virtio-net-queue-tx_waiting",
    .fields = (VMStateField[]) {
        VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
        VMSTATE_END_OF_LIST()
   },
};

static bool max_queues_gt_1(void *opaque, int version_id)
{
    return VIRTIO_NET(opaque)->max_queues > 1;
}

static bool has_ctrl_guest_offloads(void *opaque, int version_id)
{
    return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
                                   VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
}

static bool mac_table_fits(void *opaque, int version_id)
{
    return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
}

static bool mac_table_doesnt_fit(void *opaque, int version_id)
{
    return !mac_table_fits(opaque, version_id);
}

/* This temporary type is shared by all the WITH_TMP methods
 * although only some fields are used by each.
 */
struct VirtIONetMigTmp {
    VirtIONet      *parent;
    VirtIONetQueue *vqs_1;
    uint16_t        curr_queues_1;
    uint8_t         has_ufo;
    uint32_t        has_vnet_hdr;
};

/* The 2nd and subsequent tx_waiting flags are loaded later than
 * the 1st entry in the queues and only if there's more than one
 * entry.  We use the tmp mechanism to calculate a temporary
 * pointer and count and also validate the count.
 */

1715
static int virtio_net_tx_waiting_pre_save(void *opaque)
1716 1717 1718 1719 1720 1721 1722 1723
{
    struct VirtIONetMigTmp *tmp = opaque;

    tmp->vqs_1 = tmp->parent->vqs + 1;
    tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
    if (tmp->parent->curr_queues == 0) {
        tmp->curr_queues_1 = 0;
    }
1724 1725

    return 0;
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
}

static int virtio_net_tx_waiting_pre_load(void *opaque)
{
    struct VirtIONetMigTmp *tmp = opaque;

    /* Reuse the pointer setup from save */
    virtio_net_tx_waiting_pre_save(opaque);

    if (tmp->parent->curr_queues > tmp->parent->max_queues) {
        error_report("virtio-net: curr_queues %x > max_queues %x",
            tmp->parent->curr_queues, tmp->parent->max_queues);

        return -EINVAL;
    }

    return 0; /* all good */
}

static const VMStateDescription vmstate_virtio_net_tx_waiting = {
    .name      = "virtio-net-tx_waiting",
    .pre_load  = virtio_net_tx_waiting_pre_load,
    .pre_save  = virtio_net_tx_waiting_pre_save,
    .fields    = (VMStateField[]) {
        VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
                                     curr_queues_1,
                                     vmstate_virtio_net_queue_tx_waiting,
                                     struct VirtIONetQueue),
        VMSTATE_END_OF_LIST()
    },
};

/* the 'has_ufo' flag is just tested; if the incoming stream has the
 * flag set we need to check that we have it
 */
static int virtio_net_ufo_post_load(void *opaque, int version_id)
{
    struct VirtIONetMigTmp *tmp = opaque;

    if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
        error_report("virtio-net: saved image requires TUN_F_UFO support");
        return -EINVAL;
    }

    return 0;
}

1773
static int virtio_net_ufo_pre_save(void *opaque)
1774 1775 1776 1777
{
    struct VirtIONetMigTmp *tmp = opaque;

    tmp->has_ufo = tmp->parent->has_ufo;
1778 1779

    return 0;
1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
}

static const VMStateDescription vmstate_virtio_net_has_ufo = {
    .name      = "virtio-net-ufo",
    .post_load = virtio_net_ufo_post_load,
    .pre_save  = virtio_net_ufo_pre_save,
    .fields    = (VMStateField[]) {
        VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
        VMSTATE_END_OF_LIST()
    },
};

/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
 * flag set we need to check that we have it
 */
static int virtio_net_vnet_post_load(void *opaque, int version_id)
{
    struct VirtIONetMigTmp *tmp = opaque;

    if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
        error_report("virtio-net: saved image requires vnet_hdr=on");
        return -EINVAL;
    }

    return 0;
}

1807
static int virtio_net_vnet_pre_save(void *opaque)
1808 1809 1810 1811
{
    struct VirtIONetMigTmp *tmp = opaque;

    tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
1812 1813

    return 0;
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
}

static const VMStateDescription vmstate_virtio_net_has_vnet = {
    .name      = "virtio-net-vnet",
    .post_load = virtio_net_vnet_post_load,
    .pre_save  = virtio_net_vnet_pre_save,
    .fields    = (VMStateField[]) {
        VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
        VMSTATE_END_OF_LIST()
    },
};

static const VMStateDescription vmstate_virtio_net_device = {
    .name = "virtio-net-device",
    .version_id = VIRTIO_NET_VM_VERSION,
    .minimum_version_id = VIRTIO_NET_VM_VERSION,
    .post_load = virtio_net_post_load_device,
    .fields = (VMStateField[]) {
        VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
        VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
                               vmstate_virtio_net_queue_tx_waiting,
                               VirtIONetQueue),
        VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
        VMSTATE_UINT16(status, VirtIONet),
        VMSTATE_UINT8(promisc, VirtIONet),
        VMSTATE_UINT8(allmulti, VirtIONet),
        VMSTATE_UINT32(mac_table.in_use, VirtIONet),

        /* Guarded pair: If it fits we load it, else we throw it away
         * - can happen if source has a larger MAC table.; post-load
         *  sets flags in this case.
         */
        VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
                                0, mac_table_fits, mac_table.in_use,
                                 ETH_ALEN),
        VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
                                     mac_table.in_use, ETH_ALEN),

        /* Note: This is an array of uint32's that's always been saved as a
         * buffer; hold onto your endiannesses; it's actually used as a bitmap
         * but based on the uint.
         */
        VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
        VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
                         vmstate_virtio_net_has_vnet),
        VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
        VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
        VMSTATE_UINT8(alluni, VirtIONet),
        VMSTATE_UINT8(nomulti, VirtIONet),
        VMSTATE_UINT8(nouni, VirtIONet),
        VMSTATE_UINT8(nobcast, VirtIONet),
        VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
                         vmstate_virtio_net_has_ufo),
        VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
                            vmstate_info_uint16_equal, uint16_t),
        VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
        VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
                         vmstate_virtio_net_tx_waiting),
        VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
                            has_ctrl_guest_offloads),
        VMSTATE_END_OF_LIST()
   },
};

M
Mark McLoughlin 已提交
1878
static NetClientInfo net_virtio_info = {
1879
    .type = NET_CLIENT_DRIVER_NIC,
M
Mark McLoughlin 已提交
1880 1881 1882 1883
    .size = sizeof(NICState),
    .can_receive = virtio_net_can_receive,
    .receive = virtio_net_receive,
    .link_status_changed = virtio_net_set_link_status,
1884
    .query_rx_filter = virtio_net_query_rxfilter,
M
Mark McLoughlin 已提交
1885 1886
};

1887 1888
static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
{
1889
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1890
    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1891
    assert(n->vhost_started);
1892
    return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1893 1894 1895 1896 1897
}

static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
                                           bool mask)
{
1898
    VirtIONet *n = VIRTIO_NET(vdev);
J
Jason Wang 已提交
1899
    NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1900
    assert(n->vhost_started);
1901
    vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1902 1903 1904
                             vdev, idx, mask);
}

G
Gerd Hoffmann 已提交
1905
static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
A
aliguori 已提交
1906
{
1907
    int i, config_size = 0;
1908
    virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
1909

1910 1911 1912 1913 1914
    for (i = 0; feature_sizes[i].flags != 0; i++) {
        if (host_features & feature_sizes[i].flags) {
            config_size = MAX(feature_sizes[i].end, config_size);
        }
    }
1915 1916 1917
    n->config_size = config_size;
}

1918 1919 1920 1921 1922 1923 1924 1925
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
                                   const char *type)
{
    /*
     * The name can be NULL, the netclient name will be type.x.
     */
    assert(type != NULL);

1926 1927
    g_free(n->netclient_name);
    g_free(n->netclient_type);
1928
    n->netclient_name = g_strdup(name);
1929 1930 1931
    n->netclient_type = g_strdup(type);
}

1932
static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1933
{
1934
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1935
    VirtIONet *n = VIRTIO_NET(dev);
1936
    NetClientState *nc;
1937
    int i;
1938

1939 1940 1941 1942
    if (n->net_conf.mtu) {
        n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
    }

1943
    virtio_net_set_config_size(n, n->host_features);
1944
    virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
A
aliguori 已提交
1945

1946 1947 1948 1949 1950 1951 1952
    /*
     * We set a lower limit on RX queue size to what it always was.
     * Guests that want a smaller ring can always resize it without
     * help from us (using virtio 1 and up).
     */
    if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
        n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
1953
        !is_power_of_2(n->net_conf.rx_queue_size)) {
1954 1955 1956 1957 1958 1959 1960 1961
        error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
                   "must be a power of 2 between %d and %d.",
                   n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
                   VIRTQUEUE_MAX_SIZE);
        virtio_cleanup(vdev);
        return;
    }

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
    if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
        n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
        !is_power_of_2(n->net_conf.tx_queue_size)) {
        error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
                   "must be a power of 2 between %d and %d",
                   n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
                   VIRTQUEUE_MAX_SIZE);
        virtio_cleanup(vdev);
        return;
    }

1973
    n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1974
    if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
1975
        error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
S
Stefan Weil 已提交
1976
                   "must be a positive integer less than %d.",
1977
                   n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
1978 1979 1980
        virtio_cleanup(vdev);
        return;
    }
1981
    n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
J
Jason Wang 已提交
1982
    n->curr_queues = 1;
1983
    n->tx_timeout = n->net_conf.txtimer;
1984

1985 1986
    if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
                       && strcmp(n->net_conf.tx, "bh")) {
1987 1988
        error_report("virtio-net: "
                     "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1989
                     n->net_conf.tx);
1990
        error_report("Defaulting to \"bh\"");
1991 1992
    }

1993 1994
    n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
                                    n->net_conf.tx_queue_size);
1995

1996
    for (i = 0; i < n->max_queues; i++) {
1997
        virtio_net_add_queue(n, i);
1998
    }
1999

2000
    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2001 2002
    qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
    memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
2003
    n->status = VIRTIO_NET_S_LINK_UP;
J
Jason Wang 已提交
2004 2005
    n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
                                     virtio_net_announce_timer, n);
A
aliguori 已提交
2006

2007 2008 2009 2010 2011 2012 2013 2014
    if (n->netclient_type) {
        /*
         * Happen when virtio_net_set_netclient_name has been called.
         */
        n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
                              n->netclient_type, n->netclient_name, n);
    } else {
        n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2015
                              object_get_typename(OBJECT(dev)), dev->id, n);
2016 2017
    }

2018 2019
    peer_test_vnet_hdr(n);
    if (peer_has_vnet_hdr(n)) {
J
Jason Wang 已提交
2020
        for (i = 0; i < n->max_queues; i++) {
2021
            qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
J
Jason Wang 已提交
2022
        }
2023 2024 2025 2026
        n->host_hdr_len = sizeof(struct virtio_net_hdr);
    } else {
        n->host_hdr_len = 0;
    }
M
Mark McLoughlin 已提交
2027

2028
    qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2029

J
Jason Wang 已提交
2030
    n->vqs[0].tx_waiting = 0;
2031
    n->tx_burst = n->net_conf.txburst;
2032
    virtio_net_set_mrg_rx_bufs(n, 0, 0);
2033
    n->promisc = 1; /* for compatibility */
A
aliguori 已提交
2034

2035
    n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2036

2037
    n->vlans = g_malloc0(MAX_VLAN >> 3);
2038

2039 2040 2041
    nc = qemu_get_queue(n->nic);
    nc->rxfilter_notify_enabled = 1;

2042
    n->qdev = dev;
2043 2044
}

2045
static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2046
{
2047 2048
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtIONet *n = VIRTIO_NET(dev);
2049
    int i, max_queues;
2050 2051 2052 2053

    /* This will stop vhost backend if appropriate. */
    virtio_net_set_status(vdev, 0);

2054 2055 2056 2057
    g_free(n->netclient_name);
    n->netclient_name = NULL;
    g_free(n->netclient_type);
    n->netclient_type = NULL;
2058

2059 2060 2061
    g_free(n->mac_table.macs);
    g_free(n->vlans);

2062 2063 2064
    max_queues = n->multiqueue ? n->max_queues : 1;
    for (i = 0; i < max_queues; i++) {
        virtio_net_del_queue(n, i);
2065 2066
    }

J
Jason Wang 已提交
2067 2068
    timer_del(n->announce_timer);
    timer_free(n->announce_timer);
2069 2070
    g_free(n->vqs);
    qemu_del_nic(n->nic);
2071
    virtio_cleanup(vdev);
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
}

static void virtio_net_instance_init(Object *obj)
{
    VirtIONet *n = VIRTIO_NET(obj);

    /*
     * The default config_size is sizeof(struct virtio_net_config).
     * Can be overriden with virtio_net_set_config_size.
     */
    n->config_size = sizeof(struct virtio_net_config);
2083 2084 2085
    device_add_bootindex_property(obj, &n->nic_conf.bootindex,
                                  "bootindex", "/ethernet-phy@0",
                                  DEVICE(n), NULL);
2086 2087
}

2088
static int virtio_net_pre_save(void *opaque)
2089 2090 2091 2092 2093 2094
{
    VirtIONet *n = opaque;

    /* At this point, backend must be stopped, otherwise
     * it might keep writing to memory. */
    assert(!n->vhost_started);
2095 2096

    return 0;
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
}

static const VMStateDescription vmstate_virtio_net = {
    .name = "virtio-net",
    .minimum_version_id = VIRTIO_NET_VM_VERSION,
    .version_id = VIRTIO_NET_VM_VERSION,
    .fields = (VMStateField[]) {
        VMSTATE_VIRTIO_DEVICE,
        VMSTATE_END_OF_LIST()
    },
    .pre_save = virtio_net_pre_save,
};
2109

2110
static Property virtio_net_properties[] = {
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
    DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
    DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_CSUM, true),
    DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
    DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_TSO4, true),
    DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_TSO6, true),
    DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_ECN, true),
    DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_UFO, true),
    DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
                    VIRTIO_NET_F_GUEST_ANNOUNCE, true),
    DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_TSO4, true),
    DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_TSO6, true),
    DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_ECN, true),
    DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
                    VIRTIO_NET_F_HOST_UFO, true),
    DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
                    VIRTIO_NET_F_MRG_RXBUF, true),
    DEFINE_PROP_BIT("status", VirtIONet, host_features,
                    VIRTIO_NET_F_STATUS, true),
    DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_VQ, true),
    DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_RX, true),
    DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_VLAN, true),
    DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_RX_EXTRA, true),
    DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_MAC_ADDR, true),
    DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
                    VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
    DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2150 2151
    DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
    DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2152
                       TX_TIMER_INTERVAL),
2153 2154
    DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
    DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2155 2156
    DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
                       VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2157 2158
    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2159
    DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2160 2161
    DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
                     true),
2162 2163 2164 2165 2166 2167 2168
    DEFINE_PROP_END_OF_LIST(),
};

static void virtio_net_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);
    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2169

2170
    dc->props = virtio_net_properties;
2171
    dc->vmsd = &vmstate_virtio_net;
2172
    set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2173
    vdc->realize = virtio_net_device_realize;
2174
    vdc->unrealize = virtio_net_device_unrealize;
2175 2176 2177 2178 2179 2180 2181 2182 2183
    vdc->get_config = virtio_net_get_config;
    vdc->set_config = virtio_net_set_config;
    vdc->get_features = virtio_net_get_features;
    vdc->set_features = virtio_net_set_features;
    vdc->bad_features = virtio_net_bad_features;
    vdc->reset = virtio_net_reset;
    vdc->set_status = virtio_net_set_status;
    vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
    vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2184
    vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2185
    vdc->vmsd = &vmstate_virtio_net_device;
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201
}

static const TypeInfo virtio_net_info = {
    .name = TYPE_VIRTIO_NET,
    .parent = TYPE_VIRTIO_DEVICE,
    .instance_size = sizeof(VirtIONet),
    .instance_init = virtio_net_instance_init,
    .class_init = virtio_net_class_init,
};

static void virtio_register_types(void)
{
    type_register_static(&virtio_net_info);
}

type_init(virtio_register_types)