diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2022-02-01 17:31:11 +0100 |
---|---|---|
committer | Vincent Guittot <vincent.guittot@linaro.org> | 2022-08-13 11:51:13 +0200 |
commit | 1baa715ebdc305e83bc98f3f6b416ac4e62e6d8a (patch) | |
tree | faa29c3518ea3a3e939841bb05407a8162b356f9 /subprojects/libvhost-user/libvhost-user.c | |
parent | 717d828c9e7d8cd577d84bfe590e51993d1ce6e2 (diff) |
virtio-mmio: support device mode
With device mode, the virtio-mmio behaves like the device side of the
virtio-mmio transport layer instead of the driver side
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Diffstat (limited to 'subprojects/libvhost-user/libvhost-user.c')
-rw-r--r-- | subprojects/libvhost-user/libvhost-user.c | 129 |
1 files changed, 126 insertions, 3 deletions
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index 332c71fe92..4cbd9382a8 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -199,6 +199,25 @@ vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) return NULL; } +/* Translate qemu virtual address into guest physical address. */ +void * +vu_qva_to_gpa(VuDev *dev, uint64_t qemu_addr) +{ + int i; + + /* Find matching memory region. */ + for (i = 0; i < dev->nregions; i++) { + VuDevRegion *r = &dev->regions[i]; + + if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { + return (void *)(uintptr_t) + qemu_addr - r->qva + r->gpa; + } + } + + return NULL; +} + /* Translate qemu virtual address to our virtual address. */ static void * qva_to_va(VuDev *dev, uint64_t qemu_addr) @@ -1388,6 +1407,111 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, return vu_process_message_reply(dev, &vmsg); } +bool vu_set_queue_host_num(VuDev *dev, unsigned int qidx, int fd, + int num) +{ + int fd_num = 0; + VhostUserMsg vmsg = { + .request = VHOST_USER_SLAVE_VRING_NUM, + .flags = VHOST_USER_VERSION, + .size = sizeof(vmsg.payload.state), + .payload.state = { + .index = qidx & VHOST_USER_VRING_IDX_MASK, + .num = num, + }, + }; + + if (fd == -1) { + vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; + } else { + vmsg.fds[fd_num++] = fd; + } + + vmsg.fd_num = fd_num; + + if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) { + return false; + } + + pthread_mutex_lock(&dev->slave_mutex); + if (!vu_message_write(dev, dev->slave_fd, &vmsg)) { + pthread_mutex_unlock(&dev->slave_mutex); + return false; + } + + /* Also unlocks the slave_mutex */ + return vu_process_message_reply(dev, &vmsg); +} + +bool vu_set_queue_host_addr(VuDev *dev, unsigned int qidx, int fd, + struct vhost_vring_addr *vring) +{ + int fd_num = 0; + VhostUserMsg vmsg = { + .request = VHOST_USER_SLAVE_VRING_ADDR, + .flags = VHOST_USER_VERSION, + .size = sizeof(vmsg.payload.addr), + .payload.addr = *vring + }; + + if (fd == -1) { + vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; + } else { + vmsg.fds[fd_num++] = fd; + } + + vmsg.fd_num = fd_num; + + if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) { + return false; + } + + pthread_mutex_lock(&dev->slave_mutex); + if (!vu_message_write(dev, dev->slave_fd, &vmsg)) { + pthread_mutex_unlock(&dev->slave_mutex); + return false; + } + + /* Also unlocks the slave_mutex */ + return vu_process_message_reply(dev, &vmsg); +} + +bool vu_set_queue_host_state(VuDev *dev, unsigned int qidx, int fd, + int num) +{ + int fd_num = 0; + VhostUserMsg vmsg = { + .request = VHOST_USER_SLAVE_DRIVER_STATE, + .flags = VHOST_USER_VERSION, + .size = sizeof(vmsg.payload.state), + .payload.state = { + .index = qidx & VHOST_USER_VRING_IDX_MASK, + .num = num, + }, + }; + + if (fd == -1) { + vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; + } else { + vmsg.fds[fd_num++] = fd; + } + + vmsg.fd_num = fd_num; + + if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) { + return false; + } + + pthread_mutex_lock(&dev->slave_mutex); + if (!vu_message_write(dev, dev->slave_fd, &vmsg)) { + pthread_mutex_unlock(&dev->slave_mutex); + return false; + } + + /* Also unlocks the slave_mutex */ + return vu_process_message_reply(dev, &vmsg); +} + static bool vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) { @@ -2400,12 +2524,11 @@ vring_notify(VuDev *dev, VuVirtq *vq) static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) { - if (unlikely(dev->broken) || - unlikely(!vq->vring.avail)) { + if (unlikely(dev->broken) ) { return; } - if (!vring_notify(dev, vq)) { + if (!!vq->vring.avail && !vring_notify(dev, vq)) { DPRINT("skipped notify...\n"); return; } |