2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/atomic.h"
17 #include "qemu/main-loop.h"
18 #include "qemu/module.h"
19 #include "hw/virtio/virtio.h"
21 #include "net/checksum.h"
23 #include "qemu/error-report.h"
24 #include "qemu/timer.h"
25 #include "qemu/option.h"
26 #include "qemu/option_int.h"
27 #include "qemu/config-file.h"
28 #include "qapi/qmp/qdict.h"
29 #include "hw/virtio/virtio-net.h"
30 #include "net/vhost_net.h"
31 #include "net/announce.h"
32 #include "hw/virtio/virtio-bus.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-net.h"
35 #include "hw/qdev-properties.h"
36 #include "qapi/qapi-types-migration.h"
37 #include "qapi/qapi-events-migration.h"
38 #include "hw/virtio/virtio-access.h"
39 #include "migration/misc.h"
40 #include "standard-headers/linux/ethtool.h"
41 #include "sysemu/sysemu.h"
43 #include "monitor/qdev.h"
44 #include "hw/pci/pci.h"
45 #include "net_rx_pkt.h"
46 #include "hw/virtio/vhost.h"
48 #define VIRTIO_NET_VM_VERSION 11
50 #define MAC_TABLE_ENTRIES 64
51 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
53 /* previously fixed value */
54 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
55 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
57 /* for now, only allow larger queues; with virtio-1, guest can downsize */
58 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
59 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
61 #define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */
63 #define VIRTIO_NET_TCP_FLAG 0x3F
64 #define VIRTIO_NET_TCP_HDR_LENGTH 0xF000
66 /* IPv4 max payload, 16 bits in the header */
67 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
68 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
70 /* header length value in ip header without option */
71 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
73 #define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */
74 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
76 /* Purge coalesced packets timer interval, This value affects the performance
77 a lot, and should be tuned carefully, '300000'(300us) is the recommended
78 value to pass the WHQL test, '50000' can gain 2x netperf throughput with
80 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
82 #define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
83 VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
84 VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
85 VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
86 VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
87 VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
88 VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
89 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
90 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
92 static VirtIOFeature feature_sizes[] = {
93 {.flags = 1ULL << VIRTIO_NET_F_MAC,
94 .end = endof(struct virtio_net_config, mac)},
95 {.flags = 1ULL << VIRTIO_NET_F_STATUS,
96 .end = endof(struct virtio_net_config, status)},
97 {.flags = 1ULL << VIRTIO_NET_F_MQ,
98 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
99 {.flags = 1ULL << VIRTIO_NET_F_MTU,
100 .end = endof(struct virtio_net_config, mtu)},
101 {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
102 .end = endof(struct virtio_net_config, duplex)},
103 {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT),
104 .end = endof(struct virtio_net_config, supported_hash_types)},
108 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
110 VirtIONet *n = qemu_get_nic_opaque(nc);
112 return &n->vqs[nc->queue_index];
115 static int vq2q(int queue_index)
117 return queue_index / 2;
121 * - we could suppress RX interrupt if we were so inclined.
124 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
126 VirtIONet *n = VIRTIO_NET(vdev);
127 struct virtio_net_config netcfg;
128 NetClientState *nc = qemu_get_queue(n->nic);
131 memset(&netcfg, 0 , sizeof(struct virtio_net_config));
132 virtio_stw_p(vdev, &netcfg.status, n->status);
133 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
134 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
135 memcpy(netcfg.mac, n->mac, ETH_ALEN);
136 virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
137 netcfg.duplex = n->net_conf.duplex;
138 netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
139 virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
140 virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
141 VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
142 virtio_stl_p(vdev, &netcfg.supported_hash_types,
143 VIRTIO_NET_RSS_SUPPORTED_HASHES);
144 memcpy(config, &netcfg, n->config_size);
147 * Is this VDPA? No peer means not VDPA: there's no way to
148 * disconnect/reconnect a VDPA peer.
150 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
151 ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg,
154 memcpy(config, &netcfg, n->config_size);
159 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
161 VirtIONet *n = VIRTIO_NET(vdev);
162 struct virtio_net_config netcfg = {};
163 NetClientState *nc = qemu_get_queue(n->nic);
165 memcpy(&netcfg, config, n->config_size);
167 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
168 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
169 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
170 memcpy(n->mac, netcfg.mac, ETH_ALEN);
171 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
175 * Is this VDPA? No peer means not VDPA: there's no way to
176 * disconnect/reconnect a VDPA peer.
178 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
179 vhost_net_set_config(get_vhost_net(nc->peer),
180 (uint8_t *)&netcfg, 0, n->config_size,
181 VHOST_SET_CONFIG_TYPE_MASTER);
185 static bool virtio_net_started(VirtIONet *n, uint8_t status)
187 VirtIODevice *vdev = VIRTIO_DEVICE(n);
188 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
189 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
192 static void virtio_net_announce_notify(VirtIONet *net)
194 VirtIODevice *vdev = VIRTIO_DEVICE(net);
195 trace_virtio_net_announce_notify();
197 net->status |= VIRTIO_NET_S_ANNOUNCE;
198 virtio_notify_config(vdev);
201 static void virtio_net_announce_timer(void *opaque)
203 VirtIONet *n = opaque;
204 trace_virtio_net_announce_timer(n->announce_timer.round);
206 n->announce_timer.round--;
207 virtio_net_announce_notify(n);
210 static void virtio_net_announce(NetClientState *nc)
212 VirtIONet *n = qemu_get_nic_opaque(nc);
213 VirtIODevice *vdev = VIRTIO_DEVICE(n);
216 * Make sure the virtio migration announcement timer isn't running
217 * If it is, let it trigger announcement so that we do not cause
220 if (n->announce_timer.round) {
224 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
225 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
226 virtio_net_announce_notify(n);
230 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
232 VirtIODevice *vdev = VIRTIO_DEVICE(n);
233 NetClientState *nc = qemu_get_queue(n->nic);
234 int queues = n->multiqueue ? n->max_queues : 1;
236 if (!get_vhost_net(nc->peer)) {
240 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
241 !!n->vhost_started) {
244 if (!n->vhost_started) {
247 if (n->needs_vnet_hdr_swap) {
248 error_report("backend does not support %s vnet headers; "
249 "falling back on userspace virtio",
250 virtio_is_big_endian(vdev) ? "BE" : "LE");
254 /* Any packets outstanding? Purge them to avoid touching rings
255 * when vhost is running.
257 for (i = 0; i < queues; i++) {
258 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
260 /* Purge both directions: TX and RX. */
261 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
262 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
265 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
266 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
268 error_report("%uBytes MTU not supported by the backend",
275 n->vhost_started = 1;
276 r = vhost_net_start(vdev, n->nic->ncs, queues);
278 error_report("unable to start vhost net: %d: "
279 "falling back on userspace virtio", -r);
280 n->vhost_started = 0;
283 vhost_net_stop(vdev, n->nic->ncs, queues);
284 n->vhost_started = 0;
288 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
289 NetClientState *peer,
292 if (virtio_is_big_endian(vdev)) {
293 return qemu_set_vnet_be(peer, enable);
295 return qemu_set_vnet_le(peer, enable);
299 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
300 int queues, bool enable)
304 for (i = 0; i < queues; i++) {
305 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
308 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
318 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
320 VirtIODevice *vdev = VIRTIO_DEVICE(n);
321 int queues = n->multiqueue ? n->max_queues : 1;
323 if (virtio_net_started(n, status)) {
324 /* Before using the device, we tell the network backend about the
325 * endianness to use when parsing vnet headers. If the backend
326 * can't do it, we fallback onto fixing the headers in the core
329 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
331 } else if (virtio_net_started(n, vdev->status)) {
332 /* After using the device, we need to reset the network backend to
333 * the default (guest native endianness), otherwise the guest may
334 * lose network connectivity if it is rebooted into a different
337 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
341 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
343 unsigned int dropped = virtqueue_drop_all(vq);
345 virtio_notify(vdev, vq);
349 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
351 VirtIONet *n = VIRTIO_NET(vdev);
354 uint8_t queue_status;
356 virtio_net_vnet_endian_status(n, status);
357 virtio_net_vhost_status(n, status);
359 for (i = 0; i < n->max_queues; i++) {
360 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
364 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
367 queue_status = status;
370 virtio_net_started(n, queue_status) && !n->vhost_started;
373 qemu_flush_queued_packets(ncs);
376 if (!q->tx_waiting) {
382 timer_mod(q->tx_timer,
383 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
385 qemu_bh_schedule(q->tx_bh);
389 timer_del(q->tx_timer);
391 qemu_bh_cancel(q->tx_bh);
393 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
394 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
396 /* if tx is waiting we are likely have some packets in tx queue
397 * and disabled notification */
399 virtio_queue_set_notification(q->tx_vq, 1);
400 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
406 static void virtio_net_set_link_status(NetClientState *nc)
408 VirtIONet *n = qemu_get_nic_opaque(nc);
409 VirtIODevice *vdev = VIRTIO_DEVICE(n);
410 uint16_t old_status = n->status;
413 n->status &= ~VIRTIO_NET_S_LINK_UP;
415 n->status |= VIRTIO_NET_S_LINK_UP;
417 if (n->status != old_status)
418 virtio_notify_config(vdev);
420 virtio_net_set_status(vdev, vdev->status);
423 static void rxfilter_notify(NetClientState *nc)
425 VirtIONet *n = qemu_get_nic_opaque(nc);
427 if (nc->rxfilter_notify_enabled) {
428 char *path = object_get_canonical_path(OBJECT(n->qdev));
429 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
430 n->netclient_name, path);
433 /* disable event notification to avoid events flooding */
434 nc->rxfilter_notify_enabled = 0;
438 static intList *get_vlan_table(VirtIONet *n)
440 intList *list, *entry;
444 for (i = 0; i < MAX_VLAN >> 5; i++) {
445 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
446 if (n->vlans[i] & (1U << j)) {
447 entry = g_malloc0(sizeof(*entry));
448 entry->value = (i << 5) + j;
458 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
460 VirtIONet *n = qemu_get_nic_opaque(nc);
461 VirtIODevice *vdev = VIRTIO_DEVICE(n);
463 strList *str_list, *entry;
466 info = g_malloc0(sizeof(*info));
467 info->name = g_strdup(nc->name);
468 info->promiscuous = n->promisc;
471 info->unicast = RX_STATE_NONE;
472 } else if (n->alluni) {
473 info->unicast = RX_STATE_ALL;
475 info->unicast = RX_STATE_NORMAL;
479 info->multicast = RX_STATE_NONE;
480 } else if (n->allmulti) {
481 info->multicast = RX_STATE_ALL;
483 info->multicast = RX_STATE_NORMAL;
486 info->broadcast_allowed = n->nobcast;
487 info->multicast_overflow = n->mac_table.multi_overflow;
488 info->unicast_overflow = n->mac_table.uni_overflow;
490 info->main_mac = qemu_mac_strdup_printf(n->mac);
493 for (i = 0; i < n->mac_table.first_multi; i++) {
494 entry = g_malloc0(sizeof(*entry));
495 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
496 entry->next = str_list;
499 info->unicast_table = str_list;
502 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
503 entry = g_malloc0(sizeof(*entry));
504 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
505 entry->next = str_list;
508 info->multicast_table = str_list;
509 info->vlan_table = get_vlan_table(n);
511 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
512 info->vlan = RX_STATE_ALL;
513 } else if (!info->vlan_table) {
514 info->vlan = RX_STATE_NONE;
516 info->vlan = RX_STATE_NORMAL;
519 /* enable event notification after query */
520 nc->rxfilter_notify_enabled = 1;
525 static void virtio_net_reset(VirtIODevice *vdev)
527 VirtIONet *n = VIRTIO_NET(vdev);
530 /* Reset back to compatibility mode */
537 /* multiqueue is disabled by default */
539 timer_del(n->announce_timer.tm);
540 n->announce_timer.round = 0;
541 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
543 /* Flush any MAC and VLAN filter table state */
544 n->mac_table.in_use = 0;
545 n->mac_table.first_multi = 0;
546 n->mac_table.multi_overflow = 0;
547 n->mac_table.uni_overflow = 0;
548 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
549 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
550 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
551 memset(n->vlans, 0, MAX_VLAN >> 3);
553 /* Flush any async TX */
554 for (i = 0; i < n->max_queues; i++) {
555 NetClientState *nc = qemu_get_subqueue(n->nic, i);
558 qemu_flush_or_purge_queued_packets(nc->peer, true);
559 assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
564 static void peer_test_vnet_hdr(VirtIONet *n)
566 NetClientState *nc = qemu_get_queue(n->nic);
571 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
574 static int peer_has_vnet_hdr(VirtIONet *n)
576 return n->has_vnet_hdr;
579 static int peer_has_ufo(VirtIONet *n)
581 if (!peer_has_vnet_hdr(n))
584 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
589 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
590 int version_1, int hash_report)
595 n->mergeable_rx_bufs = mergeable_rx_bufs;
598 n->guest_hdr_len = hash_report ?
599 sizeof(struct virtio_net_hdr_v1_hash) :
600 sizeof(struct virtio_net_hdr_mrg_rxbuf);
601 n->rss_data.populate_hash = !!hash_report;
603 n->guest_hdr_len = n->mergeable_rx_bufs ?
604 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
605 sizeof(struct virtio_net_hdr);
608 for (i = 0; i < n->max_queues; i++) {
609 nc = qemu_get_subqueue(n->nic, i);
611 if (peer_has_vnet_hdr(n) &&
612 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
613 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
614 n->host_hdr_len = n->guest_hdr_len;
619 static int virtio_net_max_tx_queue_size(VirtIONet *n)
621 NetClientState *peer = n->nic_conf.peers.ncs[0];
624 * Backends other than vhost-user don't support max queue size.
627 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
630 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
631 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
634 return VIRTQUEUE_MAX_SIZE;
637 static int peer_attach(VirtIONet *n, int index)
639 NetClientState *nc = qemu_get_subqueue(n->nic, index);
645 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
646 vhost_set_vring_enable(nc->peer, 1);
649 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
653 if (n->max_queues == 1) {
657 return tap_enable(nc->peer);
660 static int peer_detach(VirtIONet *n, int index)
662 NetClientState *nc = qemu_get_subqueue(n->nic, index);
668 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
669 vhost_set_vring_enable(nc->peer, 0);
672 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
676 return tap_disable(nc->peer);
679 static void virtio_net_set_queues(VirtIONet *n)
684 if (n->nic->peer_deleted) {
688 for (i = 0; i < n->max_queues; i++) {
689 if (i < n->curr_queues) {
690 r = peer_attach(n, i);
693 r = peer_detach(n, i);
699 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
701 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
704 VirtIONet *n = VIRTIO_NET(vdev);
705 NetClientState *nc = qemu_get_queue(n->nic);
707 /* Firstly sync all virtio-net possible supported features */
708 features |= n->host_features;
710 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
712 if (!peer_has_vnet_hdr(n)) {
713 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
714 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
715 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
716 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
718 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
719 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
720 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
721 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
723 virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
726 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
727 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
728 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
731 if (!get_vhost_net(nc->peer)) {
735 virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
736 virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
737 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
738 vdev->backend_features = features;
740 if (n->mtu_bypass_backend &&
741 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
742 features |= (1ULL << VIRTIO_NET_F_MTU);
748 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
750 uint64_t features = 0;
752 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
754 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
755 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
756 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
757 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
758 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
763 static void virtio_net_apply_guest_offloads(VirtIONet *n)
765 qemu_set_offload(qemu_get_queue(n->nic)->peer,
766 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
767 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
768 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
769 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
770 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
773 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
775 static const uint64_t guest_offloads_mask =
776 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
777 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
778 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
779 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
780 (1ULL << VIRTIO_NET_F_GUEST_UFO);
782 return guest_offloads_mask & features;
785 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
787 VirtIODevice *vdev = VIRTIO_DEVICE(n);
788 return virtio_net_guest_offloads_by_features(vdev->guest_features);
791 static void failover_add_primary(VirtIONet *n, Error **errp)
796 if (n->primary_dev) {
800 opts = qemu_opts_find(qemu_find_opts("device"), n->primary_device_id);
802 n->primary_dev = qdev_device_add(opts, &err);
807 error_setg(errp, "Primary device not found");
808 error_append_hint(errp, "Virtio-net failover will not work. Make "
809 "sure primary device has parameter"
810 " failover_pair_id=<virtio-net-id>\n");
812 error_propagate(errp, err);
815 static int is_my_primary(void *opaque, QemuOpts *opts, Error **errp)
817 VirtIONet *n = opaque;
819 const char *standby_id = qemu_opt_get(opts, "failover_pair_id");
821 if (g_strcmp0(standby_id, n->netclient_name) == 0) {
822 n->primary_device_id = g_strdup(opts->id);
829 static DeviceState *virtio_net_find_primary(VirtIONet *n, Error **errp)
831 DeviceState *dev = NULL;
834 if (qemu_opts_foreach(qemu_find_opts("device"),
835 is_my_primary, n, &err)) {
837 error_propagate(errp, err);
840 if (n->primary_device_id) {
841 dev = qdev_find_recursive(sysbus_get_default(),
842 n->primary_device_id);
844 error_setg(errp, "Primary device id not found");
851 static DeviceState *virtio_connect_failover_devices(VirtIONet *n, Error **errp)
853 DeviceState *prim_dev = NULL;
856 prim_dev = virtio_net_find_primary(n, &err);
858 n->primary_device_id = g_strdup(prim_dev->id);
860 error_propagate(errp, err);
866 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
868 VirtIONet *n = VIRTIO_NET(vdev);
872 if (n->mtu_bypass_backend &&
873 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
874 features &= ~(1ULL << VIRTIO_NET_F_MTU);
877 virtio_net_set_multiqueue(n,
878 virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
879 virtio_has_feature(features, VIRTIO_NET_F_MQ));
881 virtio_net_set_mrg_rx_bufs(n,
882 virtio_has_feature(features,
883 VIRTIO_NET_F_MRG_RXBUF),
884 virtio_has_feature(features,
886 virtio_has_feature(features,
887 VIRTIO_NET_F_HASH_REPORT));
889 n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
890 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
891 n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
892 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
893 n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
895 if (n->has_vnet_hdr) {
896 n->curr_guest_offloads =
897 virtio_net_guest_offloads_by_features(features);
898 virtio_net_apply_guest_offloads(n);
901 for (i = 0; i < n->max_queues; i++) {
902 NetClientState *nc = qemu_get_subqueue(n->nic, i);
904 if (!get_vhost_net(nc->peer)) {
907 vhost_net_ack_features(get_vhost_net(nc->peer), features);
910 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
911 memset(n->vlans, 0, MAX_VLAN >> 3);
913 memset(n->vlans, 0xff, MAX_VLAN >> 3);
916 if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
917 qapi_event_send_failover_negotiated(n->netclient_name);
918 qatomic_set(&n->failover_primary_hidden, false);
919 failover_add_primary(n, &err);
921 n->primary_dev = virtio_connect_failover_devices(n, &err);
925 failover_add_primary(n, &err);
935 warn_report_err(err);
939 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
940 struct iovec *iov, unsigned int iov_cnt)
944 NetClientState *nc = qemu_get_queue(n->nic);
946 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
947 if (s != sizeof(on)) {
948 return VIRTIO_NET_ERR;
951 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
953 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
955 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
957 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
959 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
961 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
964 return VIRTIO_NET_ERR;
969 return VIRTIO_NET_OK;
972 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
973 struct iovec *iov, unsigned int iov_cnt)
975 VirtIODevice *vdev = VIRTIO_DEVICE(n);
979 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
980 return VIRTIO_NET_ERR;
983 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
984 if (s != sizeof(offloads)) {
985 return VIRTIO_NET_ERR;
988 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
989 uint64_t supported_offloads;
991 offloads = virtio_ldq_p(vdev, &offloads);
993 if (!n->has_vnet_hdr) {
994 return VIRTIO_NET_ERR;
997 n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
998 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
999 n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1000 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
1001 virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
1003 supported_offloads = virtio_net_supported_guest_offloads(n);
1004 if (offloads & ~supported_offloads) {
1005 return VIRTIO_NET_ERR;
1008 n->curr_guest_offloads = offloads;
1009 virtio_net_apply_guest_offloads(n);
1011 return VIRTIO_NET_OK;
1013 return VIRTIO_NET_ERR;
1017 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
1018 struct iovec *iov, unsigned int iov_cnt)
1020 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1021 struct virtio_net_ctrl_mac mac_data;
1023 NetClientState *nc = qemu_get_queue(n->nic);
1025 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
1026 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
1027 return VIRTIO_NET_ERR;
1029 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
1030 assert(s == sizeof(n->mac));
1031 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
1032 rxfilter_notify(nc);
1034 return VIRTIO_NET_OK;
1037 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
1038 return VIRTIO_NET_ERR;
1042 int first_multi = 0;
1043 uint8_t uni_overflow = 0;
1044 uint8_t multi_overflow = 0;
1045 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1047 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1048 sizeof(mac_data.entries));
1049 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1050 if (s != sizeof(mac_data.entries)) {
1053 iov_discard_front(&iov, &iov_cnt, s);
1055 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
1059 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
1060 s = iov_to_buf(iov, iov_cnt, 0, macs,
1061 mac_data.entries * ETH_ALEN);
1062 if (s != mac_data.entries * ETH_ALEN) {
1065 in_use += mac_data.entries;
1070 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
1072 first_multi = in_use;
1074 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1075 sizeof(mac_data.entries));
1076 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1077 if (s != sizeof(mac_data.entries)) {
1081 iov_discard_front(&iov, &iov_cnt, s);
1083 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
1087 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
1088 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
1089 mac_data.entries * ETH_ALEN);
1090 if (s != mac_data.entries * ETH_ALEN) {
1093 in_use += mac_data.entries;
1098 n->mac_table.in_use = in_use;
1099 n->mac_table.first_multi = first_multi;
1100 n->mac_table.uni_overflow = uni_overflow;
1101 n->mac_table.multi_overflow = multi_overflow;
1102 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
1104 rxfilter_notify(nc);
1106 return VIRTIO_NET_OK;
1110 return VIRTIO_NET_ERR;
1113 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
1114 struct iovec *iov, unsigned int iov_cnt)
1116 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1119 NetClientState *nc = qemu_get_queue(n->nic);
1121 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
1122 vid = virtio_lduw_p(vdev, &vid);
1123 if (s != sizeof(vid)) {
1124 return VIRTIO_NET_ERR;
1127 if (vid >= MAX_VLAN)
1128 return VIRTIO_NET_ERR;
1130 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
1131 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
1132 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
1133 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
1135 return VIRTIO_NET_ERR;
1137 rxfilter_notify(nc);
1139 return VIRTIO_NET_OK;
1142 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
1143 struct iovec *iov, unsigned int iov_cnt)
1145 trace_virtio_net_handle_announce(n->announce_timer.round);
1146 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1147 n->status & VIRTIO_NET_S_ANNOUNCE) {
1148 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1149 if (n->announce_timer.round) {
1150 qemu_announce_timer_step(&n->announce_timer);
1152 return VIRTIO_NET_OK;
1154 return VIRTIO_NET_ERR;
1158 static void virtio_net_disable_rss(VirtIONet *n)
1160 if (n->rss_data.enabled) {
1161 trace_virtio_net_rss_disable();
1163 n->rss_data.enabled = false;
1166 static uint16_t virtio_net_handle_rss(VirtIONet *n,
1168 unsigned int iov_cnt,
1171 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1172 struct virtio_net_rss_config cfg;
1173 size_t s, offset = 0, size_get;
1179 const char *err_msg = "";
1180 uint32_t err_value = 0;
1182 if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
1183 err_msg = "RSS is not negotiated";
1186 if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
1187 err_msg = "Hash report is not negotiated";
1190 size_get = offsetof(struct virtio_net_rss_config, indirection_table);
1191 s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
1192 if (s != size_get) {
1193 err_msg = "Short command buffer";
1194 err_value = (uint32_t)s;
1197 n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
1198 n->rss_data.indirections_len =
1199 virtio_lduw_p(vdev, &cfg.indirection_table_mask);
1200 n->rss_data.indirections_len++;
1202 n->rss_data.indirections_len = 1;
1204 if (!is_power_of_2(n->rss_data.indirections_len)) {
1205 err_msg = "Invalid size of indirection table";
1206 err_value = n->rss_data.indirections_len;
1209 if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
1210 err_msg = "Too large indirection table";
1211 err_value = n->rss_data.indirections_len;
1214 n->rss_data.default_queue = do_rss ?
1215 virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
1216 if (n->rss_data.default_queue >= n->max_queues) {
1217 err_msg = "Invalid default queue";
1218 err_value = n->rss_data.default_queue;
1222 size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
1223 g_free(n->rss_data.indirections_table);
1224 n->rss_data.indirections_table = g_malloc(size_get);
1225 if (!n->rss_data.indirections_table) {
1226 err_msg = "Can't allocate indirections table";
1227 err_value = n->rss_data.indirections_len;
1230 s = iov_to_buf(iov, iov_cnt, offset,
1231 n->rss_data.indirections_table, size_get);
1232 if (s != size_get) {
1233 err_msg = "Short indirection table buffer";
1234 err_value = (uint32_t)s;
1237 for (i = 0; i < n->rss_data.indirections_len; ++i) {
1238 uint16_t val = n->rss_data.indirections_table[i];
1239 n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
1242 size_get = sizeof(temp);
1243 s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
1244 if (s != size_get) {
1245 err_msg = "Can't get queues";
1246 err_value = (uint32_t)s;
1249 queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues;
1250 if (queues == 0 || queues > n->max_queues) {
1251 err_msg = "Invalid number of queues";
1255 if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
1256 err_msg = "Invalid key size";
1260 if (!temp.b && n->rss_data.hash_types) {
1261 err_msg = "No key provided";
1265 if (!temp.b && !n->rss_data.hash_types) {
1266 virtio_net_disable_rss(n);
1271 s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
1272 if (s != size_get) {
1273 err_msg = "Can get key buffer";
1274 err_value = (uint32_t)s;
1277 n->rss_data.enabled = true;
1278 trace_virtio_net_rss_enable(n->rss_data.hash_types,
1279 n->rss_data.indirections_len,
1283 trace_virtio_net_rss_error(err_msg, err_value);
1284 virtio_net_disable_rss(n);
1288 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1289 struct iovec *iov, unsigned int iov_cnt)
1291 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1294 virtio_net_disable_rss(n);
1295 if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
1296 queues = virtio_net_handle_rss(n, iov, iov_cnt, false);
1297 return queues ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
1299 if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
1300 queues = virtio_net_handle_rss(n, iov, iov_cnt, true);
1301 } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1302 struct virtio_net_ctrl_mq mq;
1304 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
1305 return VIRTIO_NET_ERR;
1307 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1308 if (s != sizeof(mq)) {
1309 return VIRTIO_NET_ERR;
1311 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1314 return VIRTIO_NET_ERR;
1317 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1318 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1319 queues > n->max_queues ||
1321 return VIRTIO_NET_ERR;
1324 n->curr_queues = queues;
1325 /* stop the backend before changing the number of queues to avoid handling a
1327 virtio_net_set_status(vdev, vdev->status);
1328 virtio_net_set_queues(n);
1330 return VIRTIO_NET_OK;
1333 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1335 VirtIONet *n = VIRTIO_NET(vdev);
1336 struct virtio_net_ctrl_hdr ctrl;
1337 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1338 VirtQueueElement *elem;
1340 struct iovec *iov, *iov2;
1341 unsigned int iov_cnt;
1344 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1348 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
1349 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
1350 virtio_error(vdev, "virtio-net ctrl missing headers");
1351 virtqueue_detach_element(vq, elem, 0);
1356 iov_cnt = elem->out_num;
1357 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
1358 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
1359 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
1360 if (s != sizeof(ctrl)) {
1361 status = VIRTIO_NET_ERR;
1362 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1363 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
1364 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1365 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
1366 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1367 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
1368 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1369 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
1370 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1371 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
1372 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1373 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
1376 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1377 assert(s == sizeof(status));
1379 virtqueue_push(vq, elem, sizeof(status));
1380 virtio_notify(vdev, vq);
1388 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1390 VirtIONet *n = VIRTIO_NET(vdev);
1391 int queue_index = vq2q(virtio_get_queue_index(vq));
1393 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1396 static bool virtio_net_can_receive(NetClientState *nc)
1398 VirtIONet *n = qemu_get_nic_opaque(nc);
1399 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1400 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1402 if (!vdev->vm_running) {
1406 if (nc->queue_index >= n->curr_queues) {
1410 if (!virtio_queue_ready(q->rx_vq) ||
1411 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1418 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1420 VirtIONet *n = q->n;
1421 if (virtio_queue_empty(q->rx_vq) ||
1422 (n->mergeable_rx_bufs &&
1423 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1424 virtio_queue_set_notification(q->rx_vq, 1);
1426 /* To avoid a race condition where the guest has made some buffers
1427 * available after the above check but before notification was
1428 * enabled, check for available buffers again.
1430 if (virtio_queue_empty(q->rx_vq) ||
1431 (n->mergeable_rx_bufs &&
1432 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1437 virtio_queue_set_notification(q->rx_vq, 0);
1441 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1443 virtio_tswap16s(vdev, &hdr->hdr_len);
1444 virtio_tswap16s(vdev, &hdr->gso_size);
1445 virtio_tswap16s(vdev, &hdr->csum_start);
1446 virtio_tswap16s(vdev, &hdr->csum_offset);
1449 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1450 * it never finds out that the packets don't have valid checksums. This
1451 * causes dhclient to get upset. Fedora's carried a patch for ages to
1452 * fix this with Xen but it hasn't appeared in an upstream release of
1455 * To avoid breaking existing guests, we catch udp packets and add
1456 * checksums. This is terrible but it's better than hacking the guest
1459 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1460 * we should provide a mechanism to disable it to avoid polluting the host
1463 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1464 uint8_t *buf, size_t size)
1466 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1467 (size > 27 && size < 1500) && /* normal sized MTU */
1468 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1469 (buf[23] == 17) && /* ip.protocol == UDP */
1470 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1471 net_checksum_calculate(buf, size);
1472 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1476 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1477 const void *buf, size_t size)
1479 if (n->has_vnet_hdr) {
1480 /* FIXME this cast is evil */
1481 void *wbuf = (void *)buf;
1482 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1483 size - n->host_hdr_len);
1485 if (n->needs_vnet_hdr_swap) {
1486 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1488 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1490 struct virtio_net_hdr hdr = {
1492 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1494 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1498 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1500 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1501 static const uint8_t vlan[] = {0x81, 0x00};
1502 uint8_t *ptr = (uint8_t *)buf;
1508 ptr += n->host_hdr_len;
1510 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1511 int vid = lduw_be_p(ptr + 14) & 0xfff;
1512 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1516 if (ptr[0] & 1) { // multicast
1517 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1519 } else if (n->nomulti) {
1521 } else if (n->allmulti || n->mac_table.multi_overflow) {
1525 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1526 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1533 } else if (n->alluni || n->mac_table.uni_overflow) {
1535 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1539 for (i = 0; i < n->mac_table.first_multi; i++) {
1540 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1549 static uint8_t virtio_net_get_hash_type(bool isip4,
1556 if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
1557 return NetPktRssIpV4Tcp;
1559 if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
1560 return NetPktRssIpV4Udp;
1562 if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
1563 return NetPktRssIpV4;
1566 uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
1567 VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
1569 if (istcp && (types & mask)) {
1570 return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
1571 NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
1573 mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
1574 if (isudp && (types & mask)) {
1575 return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
1576 NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
1578 mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
1580 return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
1581 NetPktRssIpV6Ex : NetPktRssIpV6;
1587 static void virtio_set_packet_hash(const uint8_t *buf, uint8_t report,
1590 struct virtio_net_hdr_v1_hash *hdr = (void *)buf;
1591 hdr->hash_value = hash;
1592 hdr->hash_report = report;
1595 static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
1598 VirtIONet *n = qemu_get_nic_opaque(nc);
1599 unsigned int index = nc->queue_index, new_index = index;
1600 struct NetRxPkt *pkt = n->rx_pkt;
1601 uint8_t net_hash_type;
1603 bool isip4, isip6, isudp, istcp;
1604 static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
1605 VIRTIO_NET_HASH_REPORT_IPv4,
1606 VIRTIO_NET_HASH_REPORT_TCPv4,
1607 VIRTIO_NET_HASH_REPORT_TCPv6,
1608 VIRTIO_NET_HASH_REPORT_IPv6,
1609 VIRTIO_NET_HASH_REPORT_IPv6_EX,
1610 VIRTIO_NET_HASH_REPORT_TCPv6_EX,
1611 VIRTIO_NET_HASH_REPORT_UDPv4,
1612 VIRTIO_NET_HASH_REPORT_UDPv6,
1613 VIRTIO_NET_HASH_REPORT_UDPv6_EX
1616 net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
1617 size - n->host_hdr_len);
1618 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
1619 if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
1620 istcp = isudp = false;
1622 if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
1623 istcp = isudp = false;
1625 net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
1626 n->rss_data.hash_types);
1627 if (net_hash_type > NetPktRssIpV6UdpEx) {
1628 if (n->rss_data.populate_hash) {
1629 virtio_set_packet_hash(buf, VIRTIO_NET_HASH_REPORT_NONE, 0);
1631 return n->rss_data.redirect ? n->rss_data.default_queue : -1;
1634 hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
1636 if (n->rss_data.populate_hash) {
1637 virtio_set_packet_hash(buf, reports[net_hash_type], hash);
1640 if (n->rss_data.redirect) {
1641 new_index = hash & (n->rss_data.indirections_len - 1);
1642 new_index = n->rss_data.indirections_table[new_index];
1645 return (index == new_index) ? -1 : new_index;
1648 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1649 size_t size, bool no_rss)
1651 VirtIONet *n = qemu_get_nic_opaque(nc);
1652 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1653 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1654 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1655 struct virtio_net_hdr_mrg_rxbuf mhdr;
1656 unsigned mhdr_cnt = 0;
1657 size_t offset, i, guest_offset;
1659 if (!virtio_net_can_receive(nc)) {
1663 if (!no_rss && n->rss_data.enabled) {
1664 int index = virtio_net_process_rss(nc, buf, size);
1666 NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
1667 return virtio_net_receive_rcu(nc2, buf, size, true);
1671 /* hdr_len refers to the header we supply to the guest */
1672 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1676 if (!receive_filter(n, buf, size))
1681 while (offset < size) {
1682 VirtQueueElement *elem;
1684 const struct iovec *sg;
1688 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1691 virtio_error(vdev, "virtio-net unexpected empty queue: "
1692 "i %zd mergeable %d offset %zd, size %zd, "
1693 "guest hdr len %zd, host hdr len %zd "
1694 "guest features 0x%" PRIx64,
1695 i, n->mergeable_rx_bufs, offset, size,
1696 n->guest_hdr_len, n->host_hdr_len,
1697 vdev->guest_features);
1702 if (elem->in_num < 1) {
1704 "virtio-net receive queue contains no in buffers");
1705 virtqueue_detach_element(q->rx_vq, elem, 0);
1712 assert(offset == 0);
1713 if (n->mergeable_rx_bufs) {
1714 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1716 offsetof(typeof(mhdr), num_buffers),
1717 sizeof(mhdr.num_buffers));
1720 receive_header(n, sg, elem->in_num, buf, size);
1721 if (n->rss_data.populate_hash) {
1722 offset = sizeof(mhdr);
1723 iov_from_buf(sg, elem->in_num, offset,
1724 buf + offset, n->host_hdr_len - sizeof(mhdr));
1726 offset = n->host_hdr_len;
1727 total += n->guest_hdr_len;
1728 guest_offset = n->guest_hdr_len;
1733 /* copy in packet. ugh */
1734 len = iov_from_buf(sg, elem->in_num, guest_offset,
1735 buf + offset, size - offset);
1738 /* If buffers can't be merged, at this point we
1739 * must have consumed the complete packet.
1740 * Otherwise, drop it. */
1741 if (!n->mergeable_rx_bufs && offset < size) {
1742 virtqueue_unpop(q->rx_vq, elem, total);
1747 /* signal other side */
1748 virtqueue_fill(q->rx_vq, elem, total, i++);
1753 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1754 iov_from_buf(mhdr_sg, mhdr_cnt,
1756 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1759 virtqueue_flush(q->rx_vq, i);
1760 virtio_notify(vdev, q->rx_vq);
1765 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
1768 RCU_READ_LOCK_GUARD();
1770 return virtio_net_receive_rcu(nc, buf, size, false);
1773 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
1775 VirtioNetRscUnit *unit)
1778 struct ip_header *ip;
1780 ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
1781 + sizeof(struct eth_header));
1782 unit->ip = (void *)ip;
1783 ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
1784 unit->ip_plen = &ip->ip_len;
1785 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
1786 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1787 unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
1790 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
1792 VirtioNetRscUnit *unit)
1794 struct ip6_header *ip6;
1796 ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
1797 + sizeof(struct eth_header));
1799 unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1800 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)
1801 + sizeof(struct ip6_header));
1802 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1804 /* There is a difference between payload lenght in ipv4 and v6,
1805 ip header is excluded in ipv6 */
1806 unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
1809 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
1810 VirtioNetRscSeg *seg)
1813 struct virtio_net_hdr_v1 *h;
1815 h = (struct virtio_net_hdr_v1 *)seg->buf;
1817 h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
1819 if (seg->is_coalesced) {
1820 h->rsc.segments = seg->packets;
1821 h->rsc.dup_acks = seg->dup_ack;
1822 h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
1823 if (chain->proto == ETH_P_IP) {
1824 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1826 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1830 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
1831 QTAILQ_REMOVE(&chain->buffers, seg, next);
1838 static void virtio_net_rsc_purge(void *opq)
1840 VirtioNetRscSeg *seg, *rn;
1841 VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
1843 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
1844 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1845 chain->stat.purge_failed++;
1850 chain->stat.timer++;
1851 if (!QTAILQ_EMPTY(&chain->buffers)) {
1852 timer_mod(chain->drain_timer,
1853 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1857 static void virtio_net_rsc_cleanup(VirtIONet *n)
1859 VirtioNetRscChain *chain, *rn_chain;
1860 VirtioNetRscSeg *seg, *rn_seg;
1862 QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
1863 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
1864 QTAILQ_REMOVE(&chain->buffers, seg, next);
1869 timer_del(chain->drain_timer);
1870 timer_free(chain->drain_timer);
1871 QTAILQ_REMOVE(&n->rsc_chains, chain, next);
1876 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
1878 const uint8_t *buf, size_t size)
1881 VirtioNetRscSeg *seg;
1883 hdr_len = chain->n->guest_hdr_len;
1884 seg = g_malloc(sizeof(VirtioNetRscSeg));
1885 seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
1886 + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
1887 memcpy(seg->buf, buf, size);
1891 seg->is_coalesced = 0;
1894 QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
1895 chain->stat.cache++;
1897 switch (chain->proto) {
1899 virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
1902 virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
1905 g_assert_not_reached();
1909 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
1910 VirtioNetRscSeg *seg,
1912 struct tcp_header *n_tcp,
1913 struct tcp_header *o_tcp)
1915 uint32_t nack, oack;
1916 uint16_t nwin, owin;
1918 nack = htonl(n_tcp->th_ack);
1919 nwin = htons(n_tcp->th_win);
1920 oack = htonl(o_tcp->th_ack);
1921 owin = htons(o_tcp->th_win);
1923 if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
1924 chain->stat.ack_out_of_win++;
1926 } else if (nack == oack) {
1927 /* duplicated ack or window probe */
1929 /* duplicated ack, add dup ack count due to whql test up to 1 */
1930 chain->stat.dup_ack++;
1933 /* Coalesce window update */
1934 o_tcp->th_win = n_tcp->th_win;
1935 chain->stat.win_update++;
1936 return RSC_COALESCE;
1939 /* pure ack, go to 'C', finalize*/
1940 chain->stat.pure_ack++;
1945 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
1946 VirtioNetRscSeg *seg,
1948 VirtioNetRscUnit *n_unit)
1952 uint32_t nseq, oseq;
1953 VirtioNetRscUnit *o_unit;
1955 o_unit = &seg->unit;
1956 o_ip_len = htons(*o_unit->ip_plen);
1957 nseq = htonl(n_unit->tcp->th_seq);
1958 oseq = htonl(o_unit->tcp->th_seq);
1960 /* out of order or retransmitted. */
1961 if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
1962 chain->stat.data_out_of_win++;
1966 data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
1968 if ((o_unit->payload == 0) && n_unit->payload) {
1969 /* From no payload to payload, normal case, not a dup ack or etc */
1970 chain->stat.data_after_pure_ack++;
1973 return virtio_net_rsc_handle_ack(chain, seg, buf,
1974 n_unit->tcp, o_unit->tcp);
1976 } else if ((nseq - oseq) != o_unit->payload) {
1977 /* Not a consistent packet, out of order */
1978 chain->stat.data_out_of_order++;
1982 if ((o_ip_len + n_unit->payload) > chain->max_payload) {
1983 chain->stat.over_size++;
1987 /* Here comes the right data, the payload length in v4/v6 is different,
1988 so use the field value to update and record the new data len */
1989 o_unit->payload += n_unit->payload; /* update new data len */
1991 /* update field in ip header */
1992 *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
1994 /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
1995 for windows guest, while this may change the behavior for linux
1996 guest (only if it uses RSC feature). */
1997 o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
1999 o_unit->tcp->th_ack = n_unit->tcp->th_ack;
2000 o_unit->tcp->th_win = n_unit->tcp->th_win;
2002 memmove(seg->buf + seg->size, data, n_unit->payload);
2003 seg->size += n_unit->payload;
2005 chain->stat.coalesced++;
2006 return RSC_COALESCE;
2010 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
2011 VirtioNetRscSeg *seg,
2012 const uint8_t *buf, size_t size,
2013 VirtioNetRscUnit *unit)
2015 struct ip_header *ip1, *ip2;
2017 ip1 = (struct ip_header *)(unit->ip);
2018 ip2 = (struct ip_header *)(seg->unit.ip);
2019 if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
2020 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2021 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2022 chain->stat.no_match++;
2023 return RSC_NO_MATCH;
2026 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2029 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
2030 VirtioNetRscSeg *seg,
2031 const uint8_t *buf, size_t size,
2032 VirtioNetRscUnit *unit)
2034 struct ip6_header *ip1, *ip2;
2036 ip1 = (struct ip6_header *)(unit->ip);
2037 ip2 = (struct ip6_header *)(seg->unit.ip);
2038 if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
2039 || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
2040 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2041 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2042 chain->stat.no_match++;
2043 return RSC_NO_MATCH;
2046 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2049 /* Packets with 'SYN' should bypass, other flag should be sent after drain
2050 * to prevent out of order */
2051 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
2052 struct tcp_header *tcp)
2057 tcp_flag = htons(tcp->th_offset_flags);
2058 tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
2059 tcp_flag &= VIRTIO_NET_TCP_FLAG;
2060 if (tcp_flag & TH_SYN) {
2061 chain->stat.tcp_syn++;
2065 if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
2066 chain->stat.tcp_ctrl_drain++;
2070 if (tcp_hdr > sizeof(struct tcp_header)) {
2071 chain->stat.tcp_all_opt++;
2075 return RSC_CANDIDATE;
2078 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
2080 const uint8_t *buf, size_t size,
2081 VirtioNetRscUnit *unit)
2084 VirtioNetRscSeg *seg, *nseg;
2086 if (QTAILQ_EMPTY(&chain->buffers)) {
2087 chain->stat.empty_cache++;
2088 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2089 timer_mod(chain->drain_timer,
2090 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
2094 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2095 if (chain->proto == ETH_P_IP) {
2096 ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
2098 ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
2101 if (ret == RSC_FINAL) {
2102 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2104 chain->stat.final_failed++;
2108 /* Send current packet */
2109 return virtio_net_do_receive(nc, buf, size);
2110 } else if (ret == RSC_NO_MATCH) {
2113 /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
2114 seg->is_coalesced = 1;
2119 chain->stat.no_match_cache++;
2120 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2124 /* Drain a connection data, this is to avoid out of order segments */
2125 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
2127 const uint8_t *buf, size_t size,
2128 uint16_t ip_start, uint16_t ip_size,
2131 VirtioNetRscSeg *seg, *nseg;
2132 uint32_t ppair1, ppair2;
2134 ppair1 = *(uint32_t *)(buf + tcp_port);
2135 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2136 ppair2 = *(uint32_t *)(seg->buf + tcp_port);
2137 if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
2138 || (ppair1 != ppair2)) {
2141 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2142 chain->stat.drain_failed++;
2148 return virtio_net_do_receive(nc, buf, size);
2151 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
2152 struct ip_header *ip,
2153 const uint8_t *buf, size_t size)
2157 /* Not an ipv4 packet */
2158 if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
2159 chain->stat.ip_option++;
2163 /* Don't handle packets with ip option */
2164 if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
2165 chain->stat.ip_option++;
2169 if (ip->ip_p != IPPROTO_TCP) {
2170 chain->stat.bypass_not_tcp++;
2174 /* Don't handle packets with ip fragment */
2175 if (!(htons(ip->ip_off) & IP_DF)) {
2176 chain->stat.ip_frag++;
2180 /* Don't handle packets with ecn flag */
2181 if (IPTOS_ECN(ip->ip_tos)) {
2182 chain->stat.ip_ecn++;
2186 ip_len = htons(ip->ip_len);
2187 if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
2188 || ip_len > (size - chain->n->guest_hdr_len -
2189 sizeof(struct eth_header))) {
2190 chain->stat.ip_hacked++;
2194 return RSC_CANDIDATE;
2197 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
2199 const uint8_t *buf, size_t size)
2203 VirtioNetRscUnit unit;
2205 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2207 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
2208 + sizeof(struct tcp_header))) {
2209 chain->stat.bypass_not_tcp++;
2210 return virtio_net_do_receive(nc, buf, size);
2213 virtio_net_rsc_extract_unit4(chain, buf, &unit);
2214 if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
2216 return virtio_net_do_receive(nc, buf, size);
2219 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2220 if (ret == RSC_BYPASS) {
2221 return virtio_net_do_receive(nc, buf, size);
2222 } else if (ret == RSC_FINAL) {
2223 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2224 ((hdr_len + sizeof(struct eth_header)) + 12),
2225 VIRTIO_NET_IP4_ADDR_SIZE,
2226 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
2229 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2232 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
2233 struct ip6_header *ip6,
2234 const uint8_t *buf, size_t size)
2238 if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
2239 != IP_HEADER_VERSION_6) {
2243 /* Both option and protocol is checked in this */
2244 if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
2245 chain->stat.bypass_not_tcp++;
2249 ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2250 if (ip_len < sizeof(struct tcp_header) ||
2251 ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
2252 - sizeof(struct ip6_header))) {
2253 chain->stat.ip_hacked++;
2257 /* Don't handle packets with ecn flag */
2258 if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
2259 chain->stat.ip_ecn++;
2263 return RSC_CANDIDATE;
2266 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
2267 const uint8_t *buf, size_t size)
2271 VirtioNetRscChain *chain;
2272 VirtioNetRscUnit unit;
2274 chain = (VirtioNetRscChain *)opq;
2275 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2277 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
2278 + sizeof(tcp_header))) {
2279 return virtio_net_do_receive(nc, buf, size);
2282 virtio_net_rsc_extract_unit6(chain, buf, &unit);
2283 if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
2284 unit.ip, buf, size)) {
2285 return virtio_net_do_receive(nc, buf, size);
2288 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2289 if (ret == RSC_BYPASS) {
2290 return virtio_net_do_receive(nc, buf, size);
2291 } else if (ret == RSC_FINAL) {
2292 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2293 ((hdr_len + sizeof(struct eth_header)) + 8),
2294 VIRTIO_NET_IP6_ADDR_SIZE,
2295 hdr_len + sizeof(struct eth_header)
2296 + sizeof(struct ip6_header));
2299 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2302 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
2306 VirtioNetRscChain *chain;
2308 if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
2312 QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
2313 if (chain->proto == proto) {
2318 chain = g_malloc(sizeof(*chain));
2320 chain->proto = proto;
2321 if (proto == (uint16_t)ETH_P_IP) {
2322 chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
2323 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2325 chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
2326 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2328 chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
2329 virtio_net_rsc_purge, chain);
2330 memset(&chain->stat, 0, sizeof(chain->stat));
2332 QTAILQ_INIT(&chain->buffers);
2333 QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
2338 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
2343 VirtioNetRscChain *chain;
2344 struct eth_header *eth;
2347 n = qemu_get_nic_opaque(nc);
2348 if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
2349 return virtio_net_do_receive(nc, buf, size);
2352 eth = (struct eth_header *)(buf + n->guest_hdr_len);
2353 proto = htons(eth->h_proto);
2355 chain = virtio_net_rsc_lookup_chain(n, nc, proto);
2357 chain->stat.received++;
2358 if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
2359 return virtio_net_rsc_receive4(chain, nc, buf, size);
2360 } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
2361 return virtio_net_rsc_receive6(chain, nc, buf, size);
2364 return virtio_net_do_receive(nc, buf, size);
2367 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
2370 VirtIONet *n = qemu_get_nic_opaque(nc);
2371 if ((n->rsc4_enabled || n->rsc6_enabled)) {
2372 return virtio_net_rsc_receive(nc, buf, size);
2374 return virtio_net_do_receive(nc, buf, size);
2378 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
2380 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
2382 VirtIONet *n = qemu_get_nic_opaque(nc);
2383 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
2384 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2386 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
2387 virtio_notify(vdev, q->tx_vq);
2389 g_free(q->async_tx.elem);
2390 q->async_tx.elem = NULL;
2392 virtio_queue_set_notification(q->tx_vq, 1);
2393 virtio_net_flush_tx(q);
2397 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2399 VirtIONet *n = q->n;
2400 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2401 VirtQueueElement *elem;
2402 int32_t num_packets = 0;
2403 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2404 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2408 if (q->async_tx.elem) {
2409 virtio_queue_set_notification(q->tx_vq, 0);
2415 unsigned int out_num;
2416 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2417 struct virtio_net_hdr_mrg_rxbuf mhdr;
2419 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2424 out_num = elem->out_num;
2425 out_sg = elem->out_sg;
2427 virtio_error(vdev, "virtio-net header not in first element");
2428 virtqueue_detach_element(q->tx_vq, elem, 0);
2433 if (n->has_vnet_hdr) {
2434 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
2436 virtio_error(vdev, "virtio-net header incorrect");
2437 virtqueue_detach_element(q->tx_vq, elem, 0);
2441 if (n->needs_vnet_hdr_swap) {
2442 virtio_net_hdr_swap(vdev, (void *) &mhdr);
2443 sg2[0].iov_base = &mhdr;
2444 sg2[0].iov_len = n->guest_hdr_len;
2445 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
2447 n->guest_hdr_len, -1);
2448 if (out_num == VIRTQUEUE_MAX_SIZE) {
2456 * If host wants to see the guest header as is, we can
2457 * pass it on unchanged. Otherwise, copy just the parts
2458 * that host is interested in.
2460 assert(n->host_hdr_len <= n->guest_hdr_len);
2461 if (n->host_hdr_len != n->guest_hdr_len) {
2462 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2464 0, n->host_hdr_len);
2465 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2467 n->guest_hdr_len, -1);
2472 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2473 out_sg, out_num, virtio_net_tx_complete);
2475 virtio_queue_set_notification(q->tx_vq, 0);
2476 q->async_tx.elem = elem;
2481 virtqueue_push(q->tx_vq, elem, 0);
2482 virtio_notify(vdev, q->tx_vq);
2485 if (++num_packets >= n->tx_burst) {
2492 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2494 VirtIONet *n = VIRTIO_NET(vdev);
2495 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2497 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2498 virtio_net_drop_tx_queue_data(vdev, vq);
2502 /* This happens when device was stopped but VCPU wasn't. */
2503 if (!vdev->vm_running) {
2508 if (q->tx_waiting) {
2509 virtio_queue_set_notification(vq, 1);
2510 timer_del(q->tx_timer);
2512 if (virtio_net_flush_tx(q) == -EINVAL) {
2516 timer_mod(q->tx_timer,
2517 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2519 virtio_queue_set_notification(vq, 0);
2523 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2525 VirtIONet *n = VIRTIO_NET(vdev);
2526 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2528 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2529 virtio_net_drop_tx_queue_data(vdev, vq);
2533 if (unlikely(q->tx_waiting)) {
2537 /* This happens when device was stopped but VCPU wasn't. */
2538 if (!vdev->vm_running) {
2541 virtio_queue_set_notification(vq, 0);
2542 qemu_bh_schedule(q->tx_bh);
2545 static void virtio_net_tx_timer(void *opaque)
2547 VirtIONetQueue *q = opaque;
2548 VirtIONet *n = q->n;
2549 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2550 /* This happens when device was stopped but BH wasn't. */
2551 if (!vdev->vm_running) {
2552 /* Make sure tx waiting is set, so we'll run when restarted. */
2553 assert(q->tx_waiting);
2559 /* Just in case the driver is not ready on more */
2560 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2564 virtio_queue_set_notification(q->tx_vq, 1);
2565 virtio_net_flush_tx(q);
2568 static void virtio_net_tx_bh(void *opaque)
2570 VirtIONetQueue *q = opaque;
2571 VirtIONet *n = q->n;
2572 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2575 /* This happens when device was stopped but BH wasn't. */
2576 if (!vdev->vm_running) {
2577 /* Make sure tx waiting is set, so we'll run when restarted. */
2578 assert(q->tx_waiting);
2584 /* Just in case the driver is not ready on more */
2585 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2589 ret = virtio_net_flush_tx(q);
2590 if (ret == -EBUSY || ret == -EINVAL) {
2591 return; /* Notification re-enable handled by tx_complete or device
2595 /* If we flush a full burst of packets, assume there are
2596 * more coming and immediately reschedule */
2597 if (ret >= n->tx_burst) {
2598 qemu_bh_schedule(q->tx_bh);
2603 /* If less than a full burst, re-enable notification and flush
2604 * anything that may have come in while we weren't looking. If
2605 * we find something, assume the guest is still active and reschedule */
2606 virtio_queue_set_notification(q->tx_vq, 1);
2607 ret = virtio_net_flush_tx(q);
2608 if (ret == -EINVAL) {
2610 } else if (ret > 0) {
2611 virtio_queue_set_notification(q->tx_vq, 0);
2612 qemu_bh_schedule(q->tx_bh);
2617 static void virtio_net_add_queue(VirtIONet *n, int index)
2619 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2621 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2622 virtio_net_handle_rx);
2624 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2625 n->vqs[index].tx_vq =
2626 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2627 virtio_net_handle_tx_timer);
2628 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2629 virtio_net_tx_timer,
2632 n->vqs[index].tx_vq =
2633 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2634 virtio_net_handle_tx_bh);
2635 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
2638 n->vqs[index].tx_waiting = 0;
2639 n->vqs[index].n = n;
2642 static void virtio_net_del_queue(VirtIONet *n, int index)
2644 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2645 VirtIONetQueue *q = &n->vqs[index];
2646 NetClientState *nc = qemu_get_subqueue(n->nic, index);
2648 qemu_purge_queued_packets(nc);
2650 virtio_del_queue(vdev, index * 2);
2652 timer_del(q->tx_timer);
2653 timer_free(q->tx_timer);
2656 qemu_bh_delete(q->tx_bh);
2660 virtio_del_queue(vdev, index * 2 + 1);
2663 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
2665 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2666 int old_num_queues = virtio_get_num_queues(vdev);
2667 int new_num_queues = new_max_queues * 2 + 1;
2670 assert(old_num_queues >= 3);
2671 assert(old_num_queues % 2 == 1);
2673 if (old_num_queues == new_num_queues) {
2678 * We always need to remove and add ctrl vq if
2679 * old_num_queues != new_num_queues. Remove ctrl_vq first,
2680 * and then we only enter one of the following two loops.
2682 virtio_del_queue(vdev, old_num_queues - 1);
2684 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2685 /* new_num_queues < old_num_queues */
2686 virtio_net_del_queue(n, i / 2);
2689 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2690 /* new_num_queues > old_num_queues */
2691 virtio_net_add_queue(n, i / 2);
2694 /* add ctrl_vq last */
2695 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2698 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
2700 int max = multiqueue ? n->max_queues : 1;
2702 n->multiqueue = multiqueue;
2703 virtio_net_change_num_queues(n, max);
2705 virtio_net_set_queues(n);
2708 static int virtio_net_post_load_device(void *opaque, int version_id)
2710 VirtIONet *n = opaque;
2711 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2714 trace_virtio_net_post_load_device();
2715 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
2716 virtio_vdev_has_feature(vdev,
2717 VIRTIO_F_VERSION_1),
2718 virtio_vdev_has_feature(vdev,
2719 VIRTIO_NET_F_HASH_REPORT));
2721 /* MAC_TABLE_ENTRIES may be different from the saved image */
2722 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
2723 n->mac_table.in_use = 0;
2726 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
2727 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
2731 * curr_guest_offloads will be later overwritten by the
2732 * virtio_set_features_nocheck call done from the virtio_load.
2733 * Here we make sure it is preserved and restored accordingly
2734 * in the virtio_net_post_load_virtio callback.
2736 n->saved_guest_offloads = n->curr_guest_offloads;
2738 virtio_net_set_queues(n);
2740 /* Find the first multicast entry in the saved MAC filter */
2741 for (i = 0; i < n->mac_table.in_use; i++) {
2742 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
2746 n->mac_table.first_multi = i;
2748 /* nc.link_down can't be migrated, so infer link_down according
2749 * to link status bit in n->status */
2750 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
2751 for (i = 0; i < n->max_queues; i++) {
2752 qemu_get_subqueue(n->nic, i)->link_down = link_down;
2755 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
2756 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
2757 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2759 virtio_net_announce_timer, n);
2760 if (n->announce_timer.round) {
2761 timer_mod(n->announce_timer.tm,
2762 qemu_clock_get_ms(n->announce_timer.type));
2764 qemu_announce_timer_del(&n->announce_timer, false);
2768 if (n->rss_data.enabled) {
2769 trace_virtio_net_rss_enable(n->rss_data.hash_types,
2770 n->rss_data.indirections_len,
2771 sizeof(n->rss_data.key));
2773 trace_virtio_net_rss_disable();
2778 static int virtio_net_post_load_virtio(VirtIODevice *vdev)
2780 VirtIONet *n = VIRTIO_NET(vdev);
2782 * The actual needed state is now in saved_guest_offloads,
2783 * see virtio_net_post_load_device for detail.
2784 * Restore it back and apply the desired offloads.
2786 n->curr_guest_offloads = n->saved_guest_offloads;
2787 if (peer_has_vnet_hdr(n)) {
2788 virtio_net_apply_guest_offloads(n);
2794 /* tx_waiting field of a VirtIONetQueue */
2795 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
2796 .name = "virtio-net-queue-tx_waiting",
2797 .fields = (VMStateField[]) {
2798 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
2799 VMSTATE_END_OF_LIST()
2803 static bool max_queues_gt_1(void *opaque, int version_id)
2805 return VIRTIO_NET(opaque)->max_queues > 1;
2808 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
2810 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
2811 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
2814 static bool mac_table_fits(void *opaque, int version_id)
2816 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
2819 static bool mac_table_doesnt_fit(void *opaque, int version_id)
2821 return !mac_table_fits(opaque, version_id);
2824 /* This temporary type is shared by all the WITH_TMP methods
2825 * although only some fields are used by each.
2827 struct VirtIONetMigTmp {
2829 VirtIONetQueue *vqs_1;
2830 uint16_t curr_queues_1;
2832 uint32_t has_vnet_hdr;
2835 /* The 2nd and subsequent tx_waiting flags are loaded later than
2836 * the 1st entry in the queues and only if there's more than one
2837 * entry. We use the tmp mechanism to calculate a temporary
2838 * pointer and count and also validate the count.
2841 static int virtio_net_tx_waiting_pre_save(void *opaque)
2843 struct VirtIONetMigTmp *tmp = opaque;
2845 tmp->vqs_1 = tmp->parent->vqs + 1;
2846 tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
2847 if (tmp->parent->curr_queues == 0) {
2848 tmp->curr_queues_1 = 0;
2854 static int virtio_net_tx_waiting_pre_load(void *opaque)
2856 struct VirtIONetMigTmp *tmp = opaque;
2858 /* Reuse the pointer setup from save */
2859 virtio_net_tx_waiting_pre_save(opaque);
2861 if (tmp->parent->curr_queues > tmp->parent->max_queues) {
2862 error_report("virtio-net: curr_queues %x > max_queues %x",
2863 tmp->parent->curr_queues, tmp->parent->max_queues);
2868 return 0; /* all good */
2871 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
2872 .name = "virtio-net-tx_waiting",
2873 .pre_load = virtio_net_tx_waiting_pre_load,
2874 .pre_save = virtio_net_tx_waiting_pre_save,
2875 .fields = (VMStateField[]) {
2876 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
2878 vmstate_virtio_net_queue_tx_waiting,
2879 struct VirtIONetQueue),
2880 VMSTATE_END_OF_LIST()
2884 /* the 'has_ufo' flag is just tested; if the incoming stream has the
2885 * flag set we need to check that we have it
2887 static int virtio_net_ufo_post_load(void *opaque, int version_id)
2889 struct VirtIONetMigTmp *tmp = opaque;
2891 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
2892 error_report("virtio-net: saved image requires TUN_F_UFO support");
2899 static int virtio_net_ufo_pre_save(void *opaque)
2901 struct VirtIONetMigTmp *tmp = opaque;
2903 tmp->has_ufo = tmp->parent->has_ufo;
2908 static const VMStateDescription vmstate_virtio_net_has_ufo = {
2909 .name = "virtio-net-ufo",
2910 .post_load = virtio_net_ufo_post_load,
2911 .pre_save = virtio_net_ufo_pre_save,
2912 .fields = (VMStateField[]) {
2913 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
2914 VMSTATE_END_OF_LIST()
2918 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
2919 * flag set we need to check that we have it
2921 static int virtio_net_vnet_post_load(void *opaque, int version_id)
2923 struct VirtIONetMigTmp *tmp = opaque;
2925 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
2926 error_report("virtio-net: saved image requires vnet_hdr=on");
2933 static int virtio_net_vnet_pre_save(void *opaque)
2935 struct VirtIONetMigTmp *tmp = opaque;
2937 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
2942 static const VMStateDescription vmstate_virtio_net_has_vnet = {
2943 .name = "virtio-net-vnet",
2944 .post_load = virtio_net_vnet_post_load,
2945 .pre_save = virtio_net_vnet_pre_save,
2946 .fields = (VMStateField[]) {
2947 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
2948 VMSTATE_END_OF_LIST()
2952 static bool virtio_net_rss_needed(void *opaque)
2954 return VIRTIO_NET(opaque)->rss_data.enabled;
2957 static const VMStateDescription vmstate_virtio_net_rss = {
2958 .name = "virtio-net-device/rss",
2960 .minimum_version_id = 1,
2961 .needed = virtio_net_rss_needed,
2962 .fields = (VMStateField[]) {
2963 VMSTATE_BOOL(rss_data.enabled, VirtIONet),
2964 VMSTATE_BOOL(rss_data.redirect, VirtIONet),
2965 VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
2966 VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
2967 VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
2968 VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
2969 VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
2970 VIRTIO_NET_RSS_MAX_KEY_SIZE),
2971 VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
2972 rss_data.indirections_len, 0,
2973 vmstate_info_uint16, uint16_t),
2974 VMSTATE_END_OF_LIST()
2978 static const VMStateDescription vmstate_virtio_net_device = {
2979 .name = "virtio-net-device",
2980 .version_id = VIRTIO_NET_VM_VERSION,
2981 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2982 .post_load = virtio_net_post_load_device,
2983 .fields = (VMStateField[]) {
2984 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
2985 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
2986 vmstate_virtio_net_queue_tx_waiting,
2988 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
2989 VMSTATE_UINT16(status, VirtIONet),
2990 VMSTATE_UINT8(promisc, VirtIONet),
2991 VMSTATE_UINT8(allmulti, VirtIONet),
2992 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
2994 /* Guarded pair: If it fits we load it, else we throw it away
2995 * - can happen if source has a larger MAC table.; post-load
2996 * sets flags in this case.
2998 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
2999 0, mac_table_fits, mac_table.in_use,
3001 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
3002 mac_table.in_use, ETH_ALEN),
3004 /* Note: This is an array of uint32's that's always been saved as a
3005 * buffer; hold onto your endiannesses; it's actually used as a bitmap
3006 * but based on the uint.
3008 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
3009 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3010 vmstate_virtio_net_has_vnet),
3011 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
3012 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
3013 VMSTATE_UINT8(alluni, VirtIONet),
3014 VMSTATE_UINT8(nomulti, VirtIONet),
3015 VMSTATE_UINT8(nouni, VirtIONet),
3016 VMSTATE_UINT8(nobcast, VirtIONet),
3017 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3018 vmstate_virtio_net_has_ufo),
3019 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
3020 vmstate_info_uint16_equal, uint16_t),
3021 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
3022 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3023 vmstate_virtio_net_tx_waiting),
3024 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
3025 has_ctrl_guest_offloads),
3026 VMSTATE_END_OF_LIST()
3028 .subsections = (const VMStateDescription * []) {
3029 &vmstate_virtio_net_rss,
3034 static NetClientInfo net_virtio_info = {
3035 .type = NET_CLIENT_DRIVER_NIC,
3036 .size = sizeof(NICState),
3037 .can_receive = virtio_net_can_receive,
3038 .receive = virtio_net_receive,
3039 .link_status_changed = virtio_net_set_link_status,
3040 .query_rx_filter = virtio_net_query_rxfilter,
3041 .announce = virtio_net_announce,
3044 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
3046 VirtIONet *n = VIRTIO_NET(vdev);
3047 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
3048 assert(n->vhost_started);
3049 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
3052 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
3055 VirtIONet *n = VIRTIO_NET(vdev);
3056 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
3057 assert(n->vhost_started);
3058 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
3062 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
3064 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
3066 n->config_size = virtio_feature_get_config_size(feature_sizes,
3070 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
3074 * The name can be NULL, the netclient name will be type.x.
3076 assert(type != NULL);
3078 g_free(n->netclient_name);
3079 g_free(n->netclient_type);
3080 n->netclient_name = g_strdup(name);
3081 n->netclient_type = g_strdup(type);
3084 static bool failover_unplug_primary(VirtIONet *n)
3086 HotplugHandler *hotplug_ctrl;
3090 hotplug_ctrl = qdev_get_hotplug_handler(n->primary_dev);
3092 pci_dev = PCI_DEVICE(n->primary_dev);
3093 pci_dev->partially_hotplugged = true;
3094 hotplug_handler_unplug_request(hotplug_ctrl, n->primary_dev, &err);
3096 error_report_err(err);
3105 static bool failover_replug_primary(VirtIONet *n, Error **errp)
3108 HotplugHandler *hotplug_ctrl;
3109 PCIDevice *pdev = PCI_DEVICE(n->primary_dev);
3110 BusState *primary_bus;
3112 if (!pdev->partially_hotplugged) {
3115 primary_bus = n->primary_dev->parent_bus;
3117 error_setg(errp, "virtio_net: couldn't find primary bus");
3120 qdev_set_parent_bus(n->primary_dev, primary_bus, &error_abort);
3121 qatomic_set(&n->failover_primary_hidden, false);
3122 hotplug_ctrl = qdev_get_hotplug_handler(n->primary_dev);
3124 hotplug_handler_pre_plug(hotplug_ctrl, n->primary_dev, &err);
3128 hotplug_handler_plug(hotplug_ctrl, n->primary_dev, &err);
3132 error_propagate(errp, err);
3136 static void virtio_net_handle_migration_primary(VirtIONet *n,
3139 bool should_be_hidden;
3142 should_be_hidden = qatomic_read(&n->failover_primary_hidden);
3144 if (!n->primary_dev) {
3145 n->primary_dev = virtio_connect_failover_devices(n, &err);
3146 if (!n->primary_dev) {
3151 if (migration_in_setup(s) && !should_be_hidden) {
3152 if (failover_unplug_primary(n)) {
3153 vmstate_unregister(VMSTATE_IF(n->primary_dev),
3154 qdev_get_vmsd(n->primary_dev),
3156 qapi_event_send_unplug_primary(n->primary_device_id);
3157 qatomic_set(&n->failover_primary_hidden, true);
3159 warn_report("couldn't unplug primary device");
3161 } else if (migration_has_failed(s)) {
3162 /* We already unplugged the device let's plug it back */
3163 if (!failover_replug_primary(n, &err)) {
3165 error_report_err(err);
3171 static void virtio_net_migration_state_notifier(Notifier *notifier, void *data)
3173 MigrationState *s = data;
3174 VirtIONet *n = container_of(notifier, VirtIONet, migration_state);
3175 virtio_net_handle_migration_primary(n, s);
3178 static int virtio_net_primary_should_be_hidden(DeviceListener *listener,
3179 QemuOpts *device_opts)
3181 VirtIONet *n = container_of(listener, VirtIONet, primary_listener);
3182 bool match_found = false;
3184 const char *standby_id;
3189 n->primary_device_dict = qemu_opts_to_qdict(device_opts,
3190 n->primary_device_dict);
3191 standby_id = qemu_opt_get(device_opts, "failover_pair_id");
3192 if (g_strcmp0(standby_id, n->netclient_name) == 0) {
3195 match_found = false;
3197 n->primary_device_dict = NULL;
3201 /* failover_primary_hidden is set during feature negotiation */
3202 hide = qatomic_read(&n->failover_primary_hidden);
3204 if (n->primary_device_dict) {
3205 g_free(n->primary_device_id);
3206 n->primary_device_id = g_strdup(qdict_get_try_str(
3207 n->primary_device_dict, "id"));
3208 if (!n->primary_device_id) {
3209 warn_report("primary_device_id not set");
3214 if (match_found && hide) {
3216 } else if (match_found && !hide) {
3223 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
3225 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3226 VirtIONet *n = VIRTIO_NET(dev);
3230 if (n->net_conf.mtu) {
3231 n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
3234 if (n->net_conf.duplex_str) {
3235 if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
3236 n->net_conf.duplex = DUPLEX_HALF;
3237 } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
3238 n->net_conf.duplex = DUPLEX_FULL;
3240 error_setg(errp, "'duplex' must be 'half' or 'full'");
3243 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3245 n->net_conf.duplex = DUPLEX_UNKNOWN;
3248 if (n->net_conf.speed < SPEED_UNKNOWN) {
3249 error_setg(errp, "'speed' must be between 0 and INT_MAX");
3252 if (n->net_conf.speed >= 0) {
3253 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3257 n->primary_listener.should_be_hidden =
3258 virtio_net_primary_should_be_hidden;
3259 qatomic_set(&n->failover_primary_hidden, true);
3260 device_listener_register(&n->primary_listener);
3261 n->migration_state.notify = virtio_net_migration_state_notifier;
3262 add_migration_state_change_notifier(&n->migration_state);
3263 n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY);
3266 virtio_net_set_config_size(n, n->host_features);
3267 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
3270 * We set a lower limit on RX queue size to what it always was.
3271 * Guests that want a smaller ring can always resize it without
3272 * help from us (using virtio 1 and up).
3274 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
3275 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
3276 !is_power_of_2(n->net_conf.rx_queue_size)) {
3277 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
3278 "must be a power of 2 between %d and %d.",
3279 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
3280 VIRTQUEUE_MAX_SIZE);
3281 virtio_cleanup(vdev);
3285 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
3286 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
3287 !is_power_of_2(n->net_conf.tx_queue_size)) {
3288 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
3289 "must be a power of 2 between %d and %d",
3290 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
3291 VIRTQUEUE_MAX_SIZE);
3292 virtio_cleanup(vdev);
3296 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
3297 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
3298 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
3299 "must be a positive integer less than %d.",
3300 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
3301 virtio_cleanup(vdev);
3304 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
3306 n->tx_timeout = n->net_conf.txtimer;
3308 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
3309 && strcmp(n->net_conf.tx, "bh")) {
3310 warn_report("virtio-net: "
3311 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
3313 error_printf("Defaulting to \"bh\"");
3316 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
3317 n->net_conf.tx_queue_size);
3319 for (i = 0; i < n->max_queues; i++) {
3320 virtio_net_add_queue(n, i);
3323 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
3324 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
3325 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
3326 n->status = VIRTIO_NET_S_LINK_UP;
3327 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3329 virtio_net_announce_timer, n);
3330 n->announce_timer.round = 0;
3332 if (n->netclient_type) {
3334 * Happen when virtio_net_set_netclient_name has been called.
3336 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3337 n->netclient_type, n->netclient_name, n);
3339 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3340 object_get_typename(OBJECT(dev)), dev->id, n);
3343 peer_test_vnet_hdr(n);
3344 if (peer_has_vnet_hdr(n)) {
3345 for (i = 0; i < n->max_queues; i++) {
3346 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
3348 n->host_hdr_len = sizeof(struct virtio_net_hdr);
3350 n->host_hdr_len = 0;
3353 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
3355 n->vqs[0].tx_waiting = 0;
3356 n->tx_burst = n->net_conf.txburst;
3357 virtio_net_set_mrg_rx_bufs(n, 0, 0, 0);
3358 n->promisc = 1; /* for compatibility */
3360 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
3362 n->vlans = g_malloc0(MAX_VLAN >> 3);
3364 nc = qemu_get_queue(n->nic);
3365 nc->rxfilter_notify_enabled = 1;
3367 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
3368 struct virtio_net_config netcfg = {};
3369 memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
3370 vhost_net_set_config(get_vhost_net(nc->peer),
3371 (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_MASTER);
3373 QTAILQ_INIT(&n->rsc_chains);
3376 net_rx_pkt_init(&n->rx_pkt, false);
3379 static void virtio_net_device_unrealize(DeviceState *dev)
3381 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3382 VirtIONet *n = VIRTIO_NET(dev);
3385 /* This will stop vhost backend if appropriate. */
3386 virtio_net_set_status(vdev, 0);
3388 g_free(n->netclient_name);
3389 n->netclient_name = NULL;
3390 g_free(n->netclient_type);
3391 n->netclient_type = NULL;
3393 g_free(n->mac_table.macs);
3397 device_listener_unregister(&n->primary_listener);
3398 g_free(n->primary_device_id);
3399 qobject_unref(n->primary_device_dict);
3400 n->primary_device_dict = NULL;
3403 max_queues = n->multiqueue ? n->max_queues : 1;
3404 for (i = 0; i < max_queues; i++) {
3405 virtio_net_del_queue(n, i);
3407 /* delete also control vq */
3408 virtio_del_queue(vdev, max_queues * 2);
3409 qemu_announce_timer_del(&n->announce_timer, false);
3411 qemu_del_nic(n->nic);
3412 virtio_net_rsc_cleanup(n);
3413 g_free(n->rss_data.indirections_table);
3414 net_rx_pkt_uninit(n->rx_pkt);
3415 virtio_cleanup(vdev);
3418 static void virtio_net_instance_init(Object *obj)
3420 VirtIONet *n = VIRTIO_NET(obj);
3423 * The default config_size is sizeof(struct virtio_net_config).
3424 * Can be overriden with virtio_net_set_config_size.
3426 n->config_size = sizeof(struct virtio_net_config);
3427 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
3428 "bootindex", "/ethernet-phy@0",
3432 static int virtio_net_pre_save(void *opaque)
3434 VirtIONet *n = opaque;
3436 /* At this point, backend must be stopped, otherwise
3437 * it might keep writing to memory. */
3438 assert(!n->vhost_started);
3443 static bool primary_unplug_pending(void *opaque)
3445 DeviceState *dev = opaque;
3446 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3447 VirtIONet *n = VIRTIO_NET(vdev);
3449 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3452 return n->primary_dev ? n->primary_dev->pending_deleted_event : false;
3455 static bool dev_unplug_pending(void *opaque)
3457 DeviceState *dev = opaque;
3458 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3460 return vdc->primary_unplug_pending(dev);
3463 static const VMStateDescription vmstate_virtio_net = {
3464 .name = "virtio-net",
3465 .minimum_version_id = VIRTIO_NET_VM_VERSION,
3466 .version_id = VIRTIO_NET_VM_VERSION,
3467 .fields = (VMStateField[]) {
3468 VMSTATE_VIRTIO_DEVICE,
3469 VMSTATE_END_OF_LIST()
3471 .pre_save = virtio_net_pre_save,
3472 .dev_unplug_pending = dev_unplug_pending,
3475 static Property virtio_net_properties[] = {
3476 DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
3477 VIRTIO_NET_F_CSUM, true),
3478 DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
3479 VIRTIO_NET_F_GUEST_CSUM, true),
3480 DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
3481 DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
3482 VIRTIO_NET_F_GUEST_TSO4, true),
3483 DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
3484 VIRTIO_NET_F_GUEST_TSO6, true),
3485 DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
3486 VIRTIO_NET_F_GUEST_ECN, true),
3487 DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
3488 VIRTIO_NET_F_GUEST_UFO, true),
3489 DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
3490 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
3491 DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
3492 VIRTIO_NET_F_HOST_TSO4, true),
3493 DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
3494 VIRTIO_NET_F_HOST_TSO6, true),
3495 DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
3496 VIRTIO_NET_F_HOST_ECN, true),
3497 DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
3498 VIRTIO_NET_F_HOST_UFO, true),
3499 DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
3500 VIRTIO_NET_F_MRG_RXBUF, true),
3501 DEFINE_PROP_BIT64("status", VirtIONet, host_features,
3502 VIRTIO_NET_F_STATUS, true),
3503 DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
3504 VIRTIO_NET_F_CTRL_VQ, true),
3505 DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
3506 VIRTIO_NET_F_CTRL_RX, true),
3507 DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
3508 VIRTIO_NET_F_CTRL_VLAN, true),
3509 DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
3510 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
3511 DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
3512 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
3513 DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
3514 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
3515 DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
3516 DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
3517 VIRTIO_NET_F_RSS, false),
3518 DEFINE_PROP_BIT64("hash", VirtIONet, host_features,
3519 VIRTIO_NET_F_HASH_REPORT, false),
3520 DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
3521 VIRTIO_NET_F_RSC_EXT, false),
3522 DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
3523 VIRTIO_NET_RSC_DEFAULT_INTERVAL),
3524 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
3525 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
3527 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
3528 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
3529 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
3530 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
3531 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
3532 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
3533 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
3534 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
3536 DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
3537 DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
3538 DEFINE_PROP_BOOL("failover", VirtIONet, failover, false),
3539 DEFINE_PROP_END_OF_LIST(),
3542 static void virtio_net_class_init(ObjectClass *klass, void *data)
3544 DeviceClass *dc = DEVICE_CLASS(klass);
3545 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3547 device_class_set_props(dc, virtio_net_properties);
3548 dc->vmsd = &vmstate_virtio_net;
3549 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
3550 vdc->realize = virtio_net_device_realize;
3551 vdc->unrealize = virtio_net_device_unrealize;
3552 vdc->get_config = virtio_net_get_config;
3553 vdc->set_config = virtio_net_set_config;
3554 vdc->get_features = virtio_net_get_features;
3555 vdc->set_features = virtio_net_set_features;
3556 vdc->bad_features = virtio_net_bad_features;
3557 vdc->reset = virtio_net_reset;
3558 vdc->set_status = virtio_net_set_status;
3559 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
3560 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
3561 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
3562 vdc->post_load = virtio_net_post_load_virtio;
3563 vdc->vmsd = &vmstate_virtio_net_device;
3564 vdc->primary_unplug_pending = primary_unplug_pending;
3567 static const TypeInfo virtio_net_info = {
3568 .name = TYPE_VIRTIO_NET,
3569 .parent = TYPE_VIRTIO_DEVICE,
3570 .instance_size = sizeof(VirtIONet),
3571 .instance_init = virtio_net_instance_init,
3572 .class_init = virtio_net_class_init,
3575 static void virtio_register_types(void)
3577 type_register_static(&virtio_net_info);
3580 type_init(virtio_register_types)