summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2022-02-01 17:31:11 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2022-08-13 11:51:13 +0200
commit1baa715ebdc305e83bc98f3f6b416ac4e62e6d8a (patch)
treefaa29c3518ea3a3e939841bb05407a8162b356f9
parent717d828c9e7d8cd577d84bfe590e51993d1ce6e2 (diff)
virtio-mmio: support device mode
With device mode, the virtio-mmio behaves like the device side of the virtio-mmio transport layer instead of the driver side Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r--hw/virtio/vhost-user-scmi.c9
-rw-r--r--hw/virtio/vhost-user.c50
-rw-r--r--hw/virtio/vhost.c10
-rw-r--r--hw/virtio/virtio-mmio.c54
-rw-r--r--hw/virtio/virtio.c21
-rw-r--r--include/hw/virtio/virtio.h3
-rw-r--r--subprojects/libvhost-user/libvhost-user.c129
-rw-r--r--subprojects/libvhost-user/libvhost-user.h11
-rw-r--r--tools/vhost-user-scmi/main.c75
9 files changed, 337 insertions, 25 deletions
diff --git a/hw/virtio/vhost-user-scmi.c b/hw/virtio/vhost-user-scmi.c
index 10ead3c3e7..2bede14b88 100644
--- a/hw/virtio/vhost-user-scmi.c
+++ b/hw/virtio/vhost-user-scmi.c
@@ -90,6 +90,15 @@ static void vu_scmi_set_status(VirtIODevice *vdev, uint8_t status)
VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+ /*
+ * When emulating device mode, we start backend as soon as features have
+ * been set in order to be notified when guest will set vring and flags
+ * like VIRTIO_CONFIG_S_DRIVER_OK
+ */
+ if (vdev->device_mode) {
+ should_start = status & VIRTIO_CONFIG_S_FEATURES_OK;
+ }
+
if (!vdev->vm_running) {
should_start = false;
}
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 6abbc9da32..216fa94773 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -134,6 +134,11 @@ typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_IOTLB_MSG = 1,
VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
+ VHOST_USER_SLAVE_VRING_CALL = 4,
+ VHOST_USER_SLAVE_VRING_ERR = 5,
+ VHOST_USER_SLAVE_VRING_NUM = 6,
+ VHOST_USER_SLAVE_VRING_ADDR = 7,
+ VHOST_USER_SLAVE_DRIVER_STATE = 8,
VHOST_USER_SLAVE_MAX
} VhostUserSlaveRequest;
@@ -1557,6 +1562,40 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
return 0;
}
+static int vhost_user_slave_handle_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *state)
+{
+ VirtIODevice *vdev = dev->vdev;
+ info_report("vhost_user_slave_handle_vring_num idx %u num %d", state->index, state->num);
+ virtio_queue_set_num(vdev, state->index & 0xFF , state->num);
+ return 0;
+}
+
+static int vhost_user_slave_handle_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr)
+{
+ VirtIODevice *vdev = dev->vdev;
+ info_report("vhost_user_slave_handle_vring_addr idx %u hw desc addr %lx", addr->index, addr->desc_user_addr);
+ virtio_queue_set_rings(vdev, addr->index & 0xFF, addr->desc_user_addr,
+ addr->avail_user_addr,
+ addr->used_user_addr);
+ return 0;
+}
+
+static int vhost_user_slave_handle_driver_state(struct vhost_dev *dev,
+ struct vhost_vring_state *state)
+{
+ VirtIODevice *vdev = dev->vdev;
+ info_report("vhost_user_slave_handle_driver_state idx %u state 0x%x", state->index, state->num);
+ virtio_queue_set_ready(vdev, state->index & 0xFF, state->num);
+ if (state->num)
+ vdev->status |= VIRTIO_CONFIG_S_DRIVER_OK;
+ else
+ vdev->status &= ~VIRTIO_CONFIG_S_DRIVER_OK;
+
+ return 0;
+}
+
static void close_slave_channel(struct vhost_user *u)
{
g_source_destroy(u->slave_src);
@@ -1614,7 +1653,16 @@ static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
fd ? fd[0] : -1);
break;
- default:
+ case VHOST_USER_SLAVE_VRING_NUM:
+ ret = vhost_user_slave_handle_vring_num(dev, &payload.state);
+ break;
+ case VHOST_USER_SLAVE_VRING_ADDR:
+ ret = vhost_user_slave_handle_vring_addr(dev, &payload.addr);
+ break;
+ case VHOST_USER_SLAVE_DRIVER_STATE:
+ ret = vhost_user_slave_handle_driver_state(dev, &payload.state);
+ break;
+ default:
error_report("Received unexpected msg type: %d.", hdr.request);
ret = -EINVAL;
}
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index b643f42ea4..b922412a15 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1093,6 +1093,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
};
struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
+ if (vdev->device_mode) {
+ state.num = virtio_queue_get_last_avail_idx(vdev, idx);
+ r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
+ if (r) {
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed");
+ return -errno;
+ }
+ }
+ else {
a = virtio_queue_get_desc_addr(vdev, idx);
if (a == 0) {
/* Queue might not be ready for start */
@@ -1148,6 +1157,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
if (r < 0) {
goto fail_alloc;
}
+ }
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 883b67d394..962459e64c 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -193,6 +193,10 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
__func__, offset);
return 0;
}
+
+ if (vdev->device_mode) {
+ return virtio_queue_get_ready(vdev, vdev->queue_sel);
+ }
return proxy->vqs[vdev->queue_sel].enabled;
case VIRTIO_MMIO_INTERRUPT_STATUS:
return qatomic_read(&vdev->isr);
@@ -215,21 +219,42 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
* the shared memory doesn't exist
*/
return -1;
+ case VIRTIO_MMIO_QUEUE_NUM:
+ if (vdev->device_mode)
+ return virtio_queue_get_num(vdev, vdev->queue_sel);
+ /* fallthrough */
+ case VIRTIO_MMIO_QUEUE_DESC_LOW:
+ if (vdev->device_mode)
+ return virtio_queue_get_addr(vdev, vdev->queue_sel) & 0xFFFFFFFF;
+ /* fallthrough */
+ case VIRTIO_MMIO_QUEUE_DESC_HIGH:
+ if (vdev->device_mode)
+ return virtio_queue_get_addr(vdev, vdev->queue_sel) >> 32;
+ /* fallthrough */
+ case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
+ if (vdev->device_mode)
+ return virtio_queue_get_avail_addr(vdev, vdev->queue_sel) & 0xFFFFFFFF;
+ /* fallthrough */
+ case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
+ if (vdev->device_mode)
+ return virtio_queue_get_avail_addr(vdev, vdev->queue_sel) >> 32;
+ /* fallthrough */
+ case VIRTIO_MMIO_QUEUE_USED_LOW:
+ if (vdev->device_mode)
+ return virtio_queue_get_used_addr(vdev, vdev->queue_sel) & 0xFFFFFFFF;
+ /* fallthrough */
+ case VIRTIO_MMIO_QUEUE_USED_HIGH:
+ if (vdev->device_mode)
+ return virtio_queue_get_used_addr(vdev, vdev->queue_sel) >> 32;
+ /* fallthrough */
case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
case VIRTIO_MMIO_DRIVER_FEATURES:
case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
case VIRTIO_MMIO_GUEST_PAGE_SIZE:
case VIRTIO_MMIO_QUEUE_SEL:
- case VIRTIO_MMIO_QUEUE_NUM:
case VIRTIO_MMIO_QUEUE_ALIGN:
case VIRTIO_MMIO_QUEUE_NOTIFY:
case VIRTIO_MMIO_INTERRUPT_ACK:
- case VIRTIO_MMIO_QUEUE_DESC_LOW:
- case VIRTIO_MMIO_QUEUE_DESC_HIGH:
- case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
- case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
- case VIRTIO_MMIO_QUEUE_USED_LOW:
- case VIRTIO_MMIO_QUEUE_USED_HIGH:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
__func__, offset);
@@ -415,7 +440,10 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
virtio_update_irq(vdev);
break;
case VIRTIO_MMIO_STATUS:
- if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ if (vdev->device_mode) {
+ if (!(value & VIRTIO_CONFIG_S_FEATURES_OK))
+ virtio_mmio_stop_ioeventfd(proxy);
+ } else if( !(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
virtio_mmio_stop_ioeventfd(proxy);
}
@@ -427,7 +455,10 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
virtio_set_status(vdev, value & 0xff);
- if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
+ if (vdev->device_mode) {
+ if (value & VIRTIO_CONFIG_S_FEATURES_OK)
+ virtio_mmio_start_ioeventfd(proxy);
+ } else if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
virtio_mmio_start_ioeventfd(proxy);
}
@@ -496,11 +527,14 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
}
proxy->vqs[vdev->queue_sel].used[1] = value;
break;
+ case VIRTIO_MMIO_DEVICE_FEATURES:
+ if (vdev->device_mode)
+ proxy->guest_features[proxy->host_features_sel] = value;
+ /* fallthrough */
case VIRTIO_MMIO_MAGIC_VALUE:
case VIRTIO_MMIO_VERSION:
case VIRTIO_MMIO_DEVICE_ID:
case VIRTIO_MMIO_VENDOR_ID:
- case VIRTIO_MMIO_DEVICE_FEATURES:
case VIRTIO_MMIO_QUEUE_NUM_MAX:
case VIRTIO_MMIO_INTERRUPT_STATUS:
case VIRTIO_MMIO_CONFIG_GENERATION:
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 9d637e043e..892d881216 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -123,6 +123,8 @@ struct VirtQueue
unsigned int inuse;
+ bool ready;
+
uint16_t vector;
VirtIOHandleOutput handle_output;
VirtIODevice *vdev;
@@ -1938,7 +1940,12 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val)
}
}
- if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
+ if (vdev->device_mode) {
+ if ((vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) !=
+ (val & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ virtio_set_started(vdev, val & VIRTIO_CONFIG_S_FEATURES_OK);
+ }
+ } else if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
}
@@ -2015,6 +2022,7 @@ void virtio_reset(void *opaque)
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
+ vdev->vq[i].ready = false;
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
}
@@ -3300,6 +3308,16 @@ bool virtio_queue_enabled(VirtIODevice *vdev, int n)
return virtio_queue_enabled_legacy(vdev, n);
}
+bool virtio_queue_get_ready(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].ready;
+}
+
+void virtio_queue_set_ready(VirtIODevice *vdev, int n, bool ready)
+{
+ vdev->vq[n].ready = ready;
+}
+
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.avail;
@@ -3685,6 +3703,7 @@ static Property virtio_properties[] = {
DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
disable_legacy_check, false),
+ DEFINE_PROP_BOOL("device-mode", VirtIODevice, device_mode, false),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index d0a6489204..26883b4e76 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -102,6 +102,7 @@ struct VirtIODevice
bool started;
bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */
bool disable_legacy_check;
+ bool device_mode;
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
@@ -289,6 +290,8 @@ typedef struct VirtIORNGConf VirtIORNGConf;
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n);
bool virtio_queue_enabled(VirtIODevice *vdev, int n);
+void virtio_queue_set_ready(VirtIODevice *vdev, int n, bool ready);
+bool virtio_queue_get_ready(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n);
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 332c71fe92..4cbd9382a8 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -199,6 +199,25 @@ vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
return NULL;
}
+/* Translate qemu virtual address into guest physical address. */
+void *
+vu_qva_to_gpa(VuDev *dev, uint64_t qemu_addr)
+{
+ int i;
+
+ /* Find matching memory region. */
+ for (i = 0; i < dev->nregions; i++) {
+ VuDevRegion *r = &dev->regions[i];
+
+ if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
+ return (void *)(uintptr_t)
+ qemu_addr - r->qva + r->gpa;
+ }
+ }
+
+ return NULL;
+}
+
/* Translate qemu virtual address to our virtual address. */
static void *
qva_to_va(VuDev *dev, uint64_t qemu_addr)
@@ -1388,6 +1407,111 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
return vu_process_message_reply(dev, &vmsg);
}
+bool vu_set_queue_host_num(VuDev *dev, unsigned int qidx, int fd,
+ int num)
+{
+ int fd_num = 0;
+ VhostUserMsg vmsg = {
+ .request = VHOST_USER_SLAVE_VRING_NUM,
+ .flags = VHOST_USER_VERSION,
+ .size = sizeof(vmsg.payload.state),
+ .payload.state = {
+ .index = qidx & VHOST_USER_VRING_IDX_MASK,
+ .num = num,
+ },
+ };
+
+ if (fd == -1) {
+ vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ } else {
+ vmsg.fds[fd_num++] = fd;
+ }
+
+ vmsg.fd_num = fd_num;
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
+ return false;
+ }
+
+ pthread_mutex_lock(&dev->slave_mutex);
+ if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
+ pthread_mutex_unlock(&dev->slave_mutex);
+ return false;
+ }
+
+ /* Also unlocks the slave_mutex */
+ return vu_process_message_reply(dev, &vmsg);
+}
+
+bool vu_set_queue_host_addr(VuDev *dev, unsigned int qidx, int fd,
+ struct vhost_vring_addr *vring)
+{
+ int fd_num = 0;
+ VhostUserMsg vmsg = {
+ .request = VHOST_USER_SLAVE_VRING_ADDR,
+ .flags = VHOST_USER_VERSION,
+ .size = sizeof(vmsg.payload.addr),
+ .payload.addr = *vring
+ };
+
+ if (fd == -1) {
+ vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ } else {
+ vmsg.fds[fd_num++] = fd;
+ }
+
+ vmsg.fd_num = fd_num;
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
+ return false;
+ }
+
+ pthread_mutex_lock(&dev->slave_mutex);
+ if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
+ pthread_mutex_unlock(&dev->slave_mutex);
+ return false;
+ }
+
+ /* Also unlocks the slave_mutex */
+ return vu_process_message_reply(dev, &vmsg);
+}
+
+bool vu_set_queue_host_state(VuDev *dev, unsigned int qidx, int fd,
+ int num)
+{
+ int fd_num = 0;
+ VhostUserMsg vmsg = {
+ .request = VHOST_USER_SLAVE_DRIVER_STATE,
+ .flags = VHOST_USER_VERSION,
+ .size = sizeof(vmsg.payload.state),
+ .payload.state = {
+ .index = qidx & VHOST_USER_VRING_IDX_MASK,
+ .num = num,
+ },
+ };
+
+ if (fd == -1) {
+ vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ } else {
+ vmsg.fds[fd_num++] = fd;
+ }
+
+ vmsg.fd_num = fd_num;
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
+ return false;
+ }
+
+ pthread_mutex_lock(&dev->slave_mutex);
+ if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
+ pthread_mutex_unlock(&dev->slave_mutex);
+ return false;
+ }
+
+ /* Also unlocks the slave_mutex */
+ return vu_process_message_reply(dev, &vmsg);
+}
+
static bool
vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -2400,12 +2524,11 @@ vring_notify(VuDev *dev, VuVirtq *vq)
static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
{
- if (unlikely(dev->broken) ||
- unlikely(!vq->vring.avail)) {
+ if (unlikely(dev->broken) ) {
return;
}
- if (!vring_notify(dev, vq)) {
+ if (!!vq->vring.avail && !vring_notify(dev, vq)) {
DPRINT("skipped notify...\n");
return;
}
diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h
index cde9f07bb3..66d4de9996 100644
--- a/subprojects/libvhost-user/libvhost-user.h
+++ b/subprojects/libvhost-user/libvhost-user.h
@@ -119,6 +119,9 @@ typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_SLAVE_VRING_CALL = 4,
VHOST_USER_SLAVE_VRING_ERR = 5,
+ VHOST_USER_SLAVE_VRING_NUM = 6,
+ VHOST_USER_SLAVE_VRING_ADDR = 7,
+ VHOST_USER_SLAVE_DRIVER_STATE = 8,
VHOST_USER_SLAVE_MAX
} VhostUserSlaveRequest;
@@ -492,6 +495,7 @@ bool vu_dispatch(VuDev *dev);
* Translate a guest address to a pointer. Returns NULL on failure.
*/
void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
+void *vu_qva_to_gpa(VuDev *dev, uint64_t mmap_addr);
/**
* vu_get_queue:
@@ -530,6 +534,13 @@ void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
int size, int offset);
+bool vu_set_queue_host_num(VuDev *dev, unsigned int qidx, int fd,
+ int num);
+bool vu_set_queue_host_addr(VuDev *dev, unsigned int qidx, int fd,
+ struct vhost_vring_addr *vring);
+bool vu_set_queue_host_state(VuDev *dev, unsigned int qidx, int fd,
+ int num);
+
/**
* vu_queue_set_notification:
* @dev: a VuDev context
diff --git a/tools/vhost-user-scmi/main.c b/tools/vhost-user-scmi/main.c
index 24e52b0090..92945e70ea 100644
--- a/tools/vhost-user-scmi/main.c
+++ b/tools/vhost-user-scmi/main.c
@@ -63,6 +63,8 @@ typedef struct {
GSocket *socket ;
GThread *thread;
VuVirtqElement *elem[VHOST_USER_SCMI_MAX_QUEUES];
+ struct vhost_vring_addr vring[VHOST_USER_SCMI_MAX_QUEUES];
+ int vring_num[VHOST_USER_SCMI_MAX_QUEUES];
bool guest;
bool started;
} VuChnl;
@@ -119,15 +121,6 @@ static void vscmi_dump_msg(struct virtio_scmi_request *msg, size_t len)
g_info("%s: %s", __func__, s->str);
}
-static uint8_t vscmi_xfer(VuDev *dev, struct virtio_scmi_request *msg, size_t len)
-{
- if (debug) {
- vscmi_dump_msg(msg, len);
- }
-
- return VIRTIO_SCMI_MSG_OK;
-}
-
/*
* vscmi_queue_set_started: set vq handler
*
@@ -216,7 +209,7 @@ static int vscmi_forward_buffer(int qidx, VuChnl *src_chnl, VuChnl *dst_chnl)
out_hdr_len = elem->out_sg[0].iov_len;
g_info("%s: path %s sent OUT size %lu @ %p", __func__, src_chnl->path, out_hdr_len, out_hdr);
if (debug)
- vscmi_xfer(&src_chnl->dev.parent, out_hdr, out_hdr_len);
+ vscmi_dump_msg(out_hdr, out_hdr_len);
vscmi_copy_req(src_chnl->elem[qidx], dst_chnl->elem[qidx]);
// g_info("%s: path %s copied IN elem size %lu", __func__, dst_chnl->path, dst_chnl->elem[qidx]->in_sg[0].iov_len);
} else
@@ -315,6 +308,34 @@ vscmi_queue_set_started(VuDev *dev, int qidx, bool started)
chnl->started = started;
+ if (notification && chnl->guest) {
+ /*
+ * In notification only mode, backend is a device that need to be setup
+ * with guest vring information.
+ *
+ * Set backend vring information when guest starts.
+ */
+ if(started) {
+ /* Guest starts, set backend/device addresses */
+ VuScmi *scmi = container_of(chnl, VuScmi, vm_dev);
+ VuChnl *be_chnl = &scmi->be_dev;
+
+ /* update vring hw address of backend */
+ vu_set_queue_host_num(&be_chnl->dev.parent, qidx, -1, chnl->vring_num[qidx]);
+ vu_set_queue_host_addr(&be_chnl->dev.parent, qidx, -1, &chnl->vring[qidx]);
+ vu_set_queue_host_state(&be_chnl->dev.parent, qidx, -1, 1);
+ vu_queue_notify(&be_chnl->dev.parent, vu_get_queue(&be_chnl->dev.parent, qidx));
+ } else {
+ /* Guest stops, clear server/device status */
+ VuScmi *scmi = container_of(chnl, VuScmi, vm_dev);
+ VuChnl *be_chnl = &scmi->be_dev;
+
+ /* clear ready and status bits */
+ vu_set_queue_host_state(&be_chnl->dev.parent, qidx, -1, 0);
+ vu_queue_notify(&be_chnl->dev.parent, vu_get_queue(&be_chnl->dev.parent, qidx));
+ }
+ }
+
vu_set_queue_handler(dev, vq, started ? vscmi_handle_ctrl : NULL);
}
@@ -336,6 +357,40 @@ static int vscmi_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
return 1;
}
+ if (notification && chnl->guest) {
+ /*
+ * In notification only mode, backend is a device that need to be setup
+ * with guest vring information.
+ *
+ * Save guest vring information.
+ */
+ if (msg->request == VHOST_USER_SET_VRING_NUM) {
+ unsigned int index = msg->payload.state.index;
+ unsigned int num = msg->payload.state.num;
+
+ /* Save guest virtqueue size */
+ chnl->vring_num[index]= num;
+
+ g_info("%s: path %s VHOST_USER_SET_VRING_NUM idx %d num %d", __func__, chnl->path, index, num);
+ }
+
+ if (msg->request == VHOST_USER_SET_VRING_ADDR) {
+ struct vhost_vring_addr addr = msg->payload.addr, *vra = &addr;
+ unsigned int index = vra->index;
+
+ /* Save guest virtqueue addresses */
+ chnl->vring[index].index = index;
+ chnl->vring[index].desc_user_addr = (uint64_t)vu_qva_to_gpa(dev,vra->desc_user_addr);
+ chnl->vring[index].used_user_addr = (uint64_t)vu_qva_to_gpa(dev,vra->used_user_addr);
+ chnl->vring[index].avail_user_addr = (uint64_t)vu_qva_to_gpa(dev,vra->avail_user_addr);
+
+ g_info("%s: path %s VHOST_USER_SET_VRING_ADDR idx %d", __func__, chnl->path, index);
+ g_info(" guest phys desc: 0x%016" PRIx64 , (uint64_t)chnl->vring[index].desc_user_addr);
+ g_info(" guest phys used: 0x%016" PRIx64 , (uint64_t)chnl->vring[index].used_user_addr);
+ g_info(" guest phys avail: 0x%016" PRIx64 , (uint64_t)chnl->vring[index].avail_user_addr);
+ }
+ }
+
if (debug) {
g_info("%s: path %s : request %d", __func__, chnl->path, msg->request);
}