static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
- uint32_t features;
if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
VIRTIO_CONFIG_S_DRIVER_OK))) {
return;
}
- features = vdev->guest_features;
-
/* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
* cache flushes. Thus, the "auto writethrough" behavior is never
* necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
*
* s->blk would erroneously be placed in writethrough mode.
*/
- if (!(features & (1 << VIRTIO_BLK_F_CONFIG_WCE))) {
+ if (!virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
aio_context_acquire(blk_get_aio_context(s->blk));
blk_set_enable_write_cache(s->blk,
- !!(features & (1 << VIRTIO_BLK_F_WCE)));
+ virtio_has_feature(vdev, VIRTIO_BLK_F_WCE));
aio_context_release(blk_get_aio_context(s->blk));
}
}
static bool use_multiport(VirtIOSerial *vser)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
- return vdev->guest_features & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
+ return virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT);
}
static size_t write_to_port(VirtIOSerialPort *port,
memcpy(&netcfg, config, n->config_size);
- if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
memcpy(n->mac, netcfg.mac, ETH_ALEN);
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
info->multicast_table = str_list;
info->vlan_table = get_vlan_table(n);
- if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
info->vlan = RX_STATE_ALL;
} else if (!info->vlan_table) {
info->vlan = RX_STATE_NONE;
VirtIONet *n = VIRTIO_NET(vdev);
int i;
- virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
+ virtio_net_set_multiqueue(n,
+ __virtio_has_feature(features, VIRTIO_NET_F_MQ));
- virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
+ virtio_net_set_mrg_rx_bufs(n,
+ __virtio_has_feature(features,
+ VIRTIO_NET_F_MRG_RXBUF));
if (n->has_vnet_hdr) {
n->curr_guest_offloads =
vhost_net_ack_features(get_vhost_net(nc->peer), features);
}
- if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
+ if (__virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
memset(n->vlans, 0, MAX_VLAN >> 3);
} else {
memset(n->vlans, 0xff, MAX_VLAN >> 3);
uint64_t offloads;
size_t s;
- if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
return VIRTIO_NET_ERR;
}
}
}
- if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
qemu_put_be64(f, n->curr_guest_offloads);
}
}
}
}
- if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
n->curr_guest_offloads = qemu_get_be64(f);
} else {
n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
qemu_get_subqueue(n->nic, i)->link_down = link_down;
}
- if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
- vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
+ virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
n->announce_counter = SELF_ANNOUNCE_ROUNDS;
timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
}
*
* TODO: always disable this workaround for virtio 1.0 devices.
*/
- if ((vdev->guest_features & (1 << VIRTIO_F_ANY_LAYOUT)) == 0) {
+ if (!virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
req_size = req->elem.out_sg[0].iov_len;
resp_size = req->elem.in_sg[0].iov_len;
}
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
VirtIODevice *vdev = VIRTIO_DEVICE(s);
- if (((vdev->guest_features >> VIRTIO_SCSI_F_CHANGE) & 1) &&
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
dev->type != TYPE_ROM) {
virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
sense.asc | (sense.ascq << 8));
blk_op_block_all(sd->conf.blk, s->blocker);
}
- if ((vdev->guest_features >> VIRTIO_SCSI_F_HOTPLUG) & 1) {
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
virtio_scsi_push_event(s, sd,
VIRTIO_SCSI_T_TRANSPORT_RESET,
VIRTIO_SCSI_EVT_RESET_RESCAN);
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
SCSIDevice *sd = SCSI_DEVICE(dev);
- if ((vdev->guest_features >> VIRTIO_SCSI_F_HOTPLUG) & 1) {
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
virtio_scsi_push_event(s, sd,
VIRTIO_SCSI_T_TRANSPORT_RESET,
VIRTIO_SCSI_EVT_RESET_REMOVED);
/* Disable guest->host notifies */
void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
{
- if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
}
}
*/
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring)
{
- if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
+ if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_avail_event(&vring->vr) = vring->vr.avail->idx;
} else {
vring_clear_used_flags(vdev, vring, VRING_USED_F_NO_NOTIFY);
* interrupts. */
smp_mb();
- if ((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
+ if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(!vring_more_avail(vdev, vring))) {
return true;
}
- if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
return !(vring_get_avail_flags(vdev, vring) &
VRING_AVAIL_F_NO_INTERRUPT);
}
/* On success, increment avail index. */
vring->last_avail_idx++;
- if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
+ if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_avail_event(&vring->vr) = vring->last_avail_idx;
}
static bool balloon_stats_supported(const VirtIOBalloon *s)
{
VirtIODevice *vdev = VIRTIO_DEVICE(s);
- return vdev->guest_features & (1 << VIRTIO_BALLOON_F_STATS_VQ);
+ return virtio_has_feature(vdev, VIRTIO_BALLOON_F_STATS_VQ);
}
static bool balloon_stats_enabled(const VirtIOBalloon *s)
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
vq->notification = enable;
- if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
+ if (virtio_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vring_avail_idx(vq));
} else if (enable) {
vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
max = vq->vring.num;
i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
- if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
+ if (virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vq->last_avail_idx);
}
/* We need to expose used array entries before checking used event. */
smp_mb();
/* Always notify when queue is empty (when feature acknowledge) */
- if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
- !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
+ if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) {
return true;
}
- if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
}
*features &= ~(1 << fbit);
}
+static inline bool __virtio_has_feature(uint32_t features, unsigned int fbit)
+{
+ assert(fbit < 32);
+ return !!(features & (1 << fbit));
+}
+
+static inline bool virtio_has_feature(VirtIODevice *vdev, unsigned int fbit)
+{
+ return __virtio_has_feature(vdev->guest_features, fbit);
+}
+
static inline bool virtio_is_big_endian(VirtIODevice *vdev)
{
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);