diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2021-09-07 14:14:44 +0200 |
---|---|---|
committer | Vincent Guittot <vincent.guittot@linaro.org> | 2022-06-24 14:55:38 +0200 |
commit | df127d1ffd6110fd126ad4029ce4cc04b6461d21 (patch) | |
tree | 86771f14373fa876a7b028b5b24011bd5cbe5cd2 | |
parent | 823a3f11fb8f04c3c3cc0f95f968fef1bfc6534f (diff) |
vhost-user: Add scmi device
Add vhost user scmi devices (mmio and pci)
Add a simple vhost user scmi bridge to be used with a SCMI backedn in a VM
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r-- | hw/virtio/Kconfig | 5 | ||||
-rw-r--r-- | hw/virtio/meson.build | 2 | ||||
-rw-r--r-- | hw/virtio/vhost-user-scmi-pci.c | 69 | ||||
-rw-r--r-- | hw/virtio/vhost-user-scmi.c | 287 | ||||
-rw-r--r-- | include/hw/virtio/vhost-user-scmi.h | 36 | ||||
-rw-r--r-- | tools/meson.build | 8 | ||||
-rw-r--r-- | tools/vhost-user-scmi/50-qemu-scmi.json.in | 5 | ||||
-rw-r--r-- | tools/vhost-user-scmi/main.c | 529 | ||||
-rw-r--r-- | tools/vhost-user-scmi/meson.build | 10 |
9 files changed, 951 insertions, 0 deletions
diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index c144d42f9b..6a1c5f2476 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -68,3 +68,8 @@ config VHOST_USER_RNG bool default y depends on VIRTIO && VHOST_USER + +config VHOST_USER_SCMI + bool + default y + depends on VIRTIO && VHOST_USER diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index 67dc77e00f..2df5ae8974 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -29,6 +29,8 @@ virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], if_true: files('vhost-user-i2c-pci.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) virtio_ss.add(when: ['CONFIG_VHOST_USER_RNG', 'CONFIG_VIRTIO_PCI'], if_true: files('vhost-user-rng-pci.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c')) +virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c')) virtio_pci_ss = ss.source_set() virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) diff --git a/hw/virtio/vhost-user-scmi-pci.c b/hw/virtio/vhost-user-scmi-pci.c new file mode 100644 index 0000000000..2fee73f2f8 --- /dev/null +++ b/hw/virtio/vhost-user-scmi-pci.c @@ -0,0 +1,69 @@ +/* + * Vhost-user scmi virtio device PCI glue + * + * Copyright (c) 2021 Vincent Guittot <vincent.guittot@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-scmi.h" +#include "virtio-pci.h" + +struct VHostUserSCMIPCI { + VirtIOPCIProxy parent_obj; + VHostUserSCMI vdev; +}; + +typedef struct VHostUserSCMIPCI VHostUserSCMIPCI; + +#define TYPE_VHOST_USER_SCMI_PCI "vhost-user-scmi-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserSCMIPCI, VHOST_USER_SCMI_PCI, + TYPE_VHOST_USER_SCMI_PCI) + +static void vhost_user_scmi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserSCMIPCI *dev = VHOST_USER_SCMI_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + vpci_dev->nvectors = 2; + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_scmi_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_scmi_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; +} + +static void vhost_user_scmi_pci_instance_init(Object *obj) +{ + VHostUserSCMIPCI *dev = VHOST_USER_SCMI_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_SCMI); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_scmi_pci_info = { + .base_name = TYPE_VHOST_USER_SCMI_PCI, + .non_transitional_name = "vhost-user-scmi-pci", + .instance_size = sizeof(VHostUserSCMIPCI), + .instance_init = vhost_user_scmi_pci_instance_init, + .class_init = vhost_user_scmi_pci_class_init, +}; + +static void vhost_user_scmi_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_scmi_pci_info); +} + +type_init(vhost_user_scmi_pci_register); diff --git a/hw/virtio/vhost-user-scmi.c b/hw/virtio/vhost-user-scmi.c new file mode 100644 index 0000000000..10ead3c3e7 --- /dev/null +++ b/hw/virtio/vhost-user-scmi.c @@ -0,0 +1,287 @@ +/* + * Vhost-user scmi virtio device + * + * Copyright (c) 2021 Vincent Guittot <vincent.guittot@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-scmi.h" +#include "qemu/error-report.h" +#include "standard-headers/linux/virtio_ids.h" + +static void vu_scmi_start(VirtIODevice *vdev) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + int ret, i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return; + } + + ret = vhost_dev_enable_notifiers(&scmi->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return; + } + + ret = k->set_guest_notifiers(qbus->parent, scmi->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + scmi->vhost_dev.acked_features = vdev->guest_features; + + ret = vhost_dev_start(&scmi->vhost_dev, vdev); + if (ret < 0) { + error_report("Error starting vhost-user-scmi: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < scmi->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&scmi->vhost_dev, vdev, i, false); + } + + return; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, scmi->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&scmi->vhost_dev, vdev); +} + +static void vu_scmi_stop(VirtIODevice *vdev) +{ + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&scmi->vhost_dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, scmi->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&scmi->vhost_dev, vdev); +} + +static void vu_scmi_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + + if (!vdev->vm_running) { + should_start = false; + } + + if (scmi->vhost_dev.started == should_start) { + return; + } + + if (should_start) { + vu_scmi_start(vdev); + } else { + vu_scmi_stop(vdev); + } +} + +static uint64_t vu_scmi_get_features(VirtIODevice *vdev, + uint64_t requested_features, Error **errp) +{ + virtio_add_feature(&requested_features, VIRTIO_SCMI_F_P2A_CHANNELS); + virtio_add_feature(&requested_features, VIRTIO_F_ACCESS_PLATFORM); + /* No feature bits used yet */ + return requested_features; +} + +static void vu_scmi_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vu_scmi_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + + vhost_virtqueue_mask(&scmi->vhost_dev, vdev, idx, mask); +} + +static bool vu_scmi_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + + return vhost_virtqueue_pending(&scmi->vhost_dev, idx); +} + +static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserSCMI *scmi) +{ + vhost_user_cleanup(&scmi->vhost_user); + virtio_delete_queue(scmi->req_vq); + virtio_delete_queue(scmi->notif_vq); + virtio_cleanup(vdev); + g_free(scmi->vhost_dev.vqs); + scmi->vhost_dev.vqs = NULL; +} + +static int vu_scmi_connect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + + if (scmi->connected) { + return 0; + } + scmi->connected = true; + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vu_scmi_start(vdev); + } + + return 0; +} + +static void vu_scmi_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + + if (!scmi->connected) { + return; + } + scmi->connected = false; + + if (scmi->vhost_dev.started) { + vu_scmi_stop(vdev); + } +} + +static void vu_scmi_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev); + + switch (event) { + case CHR_EVENT_OPENED: + if (vu_scmi_connect(dev) < 0) { + qemu_chr_fe_disconnect(&scmi->chardev); + return; + } + break; + case CHR_EVENT_CLOSED: + vu_scmi_disconnect(dev); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void vu_scmi_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserSCMI *scmi = VHOST_USER_SCMI(dev); + int ret; + + if (!scmi->chardev.chr) { + error_setg(errp, "missing chardev"); + return; + } + + if (!vhost_user_init(&scmi->vhost_user, &scmi->chardev, errp)) { + return; + } + + virtio_init(vdev, "vhost-user-scmi", VIRTIO_ID_SCMI, 0); + + scmi->vhost_dev.nvqs = 2; + scmi->req_vq = virtio_add_queue(vdev, 16, vu_scmi_handle_output); + scmi->notif_vq = virtio_add_queue(vdev, 16, vu_scmi_handle_output); + scmi->vhost_dev.vqs = g_new0(struct vhost_virtqueue, scmi->vhost_dev.nvqs); + + ret = vhost_dev_init(&scmi->vhost_dev, &scmi->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + do_vhost_user_cleanup(vdev, scmi); + } + + qemu_chr_fe_set_handlers(&scmi->chardev, NULL, NULL, vu_scmi_event, NULL, + dev, NULL, true); +} + +static void vu_scmi_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserSCMI *scmi = VHOST_USER_SCMI(dev); + + /* This will stop vhost backend if appropriate. */ + vu_scmi_set_status(vdev, 0); + vhost_dev_cleanup(&scmi->vhost_dev); + do_vhost_user_cleanup(vdev, scmi); +} + +static const VMStateDescription vu_scmi_vmstate = { + .name = "vhost-user-scmi", + .unmigratable = 1, +}; + +static Property vu_scmi_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserSCMI, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_scmi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vu_scmi_properties); + dc->vmsd = &vu_scmi_vmstate; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = vu_scmi_device_realize; + vdc->unrealize = vu_scmi_device_unrealize; + vdc->get_features = vu_scmi_get_features; + vdc->set_status = vu_scmi_set_status; + vdc->guest_notifier_mask = vu_scmi_guest_notifier_mask; + vdc->guest_notifier_pending = vu_scmi_guest_notifier_pending; +} + +static const TypeInfo vu_scmi_info = { + .name = TYPE_VHOST_USER_SCMI, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserSCMI), + .class_init = vu_scmi_class_init, +}; + +static void vu_scmi_register_types(void) +{ + type_register_static(&vu_scmi_info); +} + +type_init(vu_scmi_register_types) diff --git a/include/hw/virtio/vhost-user-scmi.h b/include/hw/virtio/vhost-user-scmi.h new file mode 100644 index 0000000000..e9a022add7 --- /dev/null +++ b/include/hw/virtio/vhost-user-scmi.h @@ -0,0 +1,36 @@ +/* + * Vhost-user scmi virtio device + * + * Copyright (c) 2021 Viresh Kumar <viresh.kumar@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _QEMU_VHOST_USER_SCMI_H +#define _QEMU_VHOST_USER_SCMI_H + +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" + +/* Device implements some SCMI notifications, or delayed responses. */ +#define VIRTIO_SCMI_F_P2A_CHANNELS 0 + +/* Definitions from virtio-scmi specifications */ +#define VHOST_USER_SCMI_MAX_QUEUES 2 + +#define TYPE_VHOST_USER_SCMI "vhost-user-scmi-device" +OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCMI, VHOST_USER_SCMI) + +struct VHostUserSCMI { + + VirtIODevice parent; + CharBackend chardev; + struct vhost_virtqueue *vhost_vq; + struct vhost_dev vhost_dev; + VhostUserState vhost_user; + VirtQueue *req_vq; + VirtQueue *notif_vq; + bool connected; +}; + +#endif /* _QEMU_VHOST_USER_SCMI_H */ diff --git a/tools/meson.build b/tools/meson.build index 46977af84f..9c85348293 100644 --- a/tools/meson.build +++ b/tools/meson.build @@ -11,3 +11,11 @@ have_virtiofsd = get_option('virtiofsd') \ if have_virtiofsd subdir('virtiofsd') endif + +have_virtioscmi= (have_system and + have_tools and + 'CONFIG_LINUX' in config_host) + +if have_virtioscmi + subdir('vhost-user-scmi') +endif diff --git a/tools/vhost-user-scmi/50-qemu-scmi.json.in b/tools/vhost-user-scmi/50-qemu-scmi.json.in new file mode 100644 index 0000000000..fb5a6b0a22 --- /dev/null +++ b/tools/vhost-user-scmi/50-qemu-scmi.json.in @@ -0,0 +1,5 @@ +{ + "description": "QEMU vhost-user-scmi", + "type": "bridge", + "binary": "@libexecdir@/vhost-user-scmi" +} diff --git a/tools/vhost-user-scmi/main.c b/tools/vhost-user-scmi/main.c new file mode 100644 index 0000000000..24e52b0090 --- /dev/null +++ b/tools/vhost-user-scmi/main.c @@ -0,0 +1,529 @@ +/* + * VIRTIO SCMI Bridge via vhost-user + * + * Copyright (c) 2021 Vincent Guittot <vincent.guittot@linaro.org> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#define G_LOG_DOMAIN "vhost-user-scmi" +#define G_LOG_USE_STRUCTURED 1 + +#include <glib.h> +#include <gio/gunixsocketaddress.h> +#include <glib-unix.h> +#include <stdio.h> +#include <stdbool.h> +#include <inttypes.h> + +#include "subprojects/libvhost-user/libvhost-user-glib.h" +#include "subprojects/libvhost-user/libvhost-user.h" + +/* Device implements some SCMI notifications, or delayed responses. */ +#define VIRTIO_SCMI_F_P2A_CHANNELS 0 +/* Definitions from virtio-scmi specifications */ +#define VHOST_USER_SCMI_MAX_QUEUES 2 + +/* Status */ +#define VIRTIO_SCMI_MSG_OK 0 +#define VIRTIO_SCMI_MSG_ERR 1 + +/** + * struct virtio_scmi_request - the virtio scmi message request header + * @hdr: the controlled device's address + * @data: used to pad to full dword + */ +struct virtio_scmi_request { + __virtio32 hdr; + __u8 data[]; +}; + +struct virtio_scmi_response { + __virtio32 hdr; + __virtio32 status; + __u8 data[]; +}; + +struct virtio_scmi_notification { + __virtio32 hdr; + __u8 data[]; +}; + +/* vhost-user-scmi definitions */ + +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + const typeof(((type *) 0)->member) *__mptr = (ptr); \ + (type *) ((char *) __mptr - offsetof(type, member));}) +#endif + +typedef struct { + VugDev dev; + gchar *path; + GSocket *socket ; + GThread *thread; + VuVirtqElement *elem[VHOST_USER_SCMI_MAX_QUEUES]; + bool guest; + bool started; +} VuChnl; + +typedef struct { + VuChnl vm_dev; + VuChnl be_dev; + GMainLoop *loop; +} VuScmi; + +static gboolean print_cap, notification, verbose, debug; +static gchar *socket_path_vm; +static gchar *socket_path_be; + +static GOptionEntry options[] = { + { "socket-path-vm", 'v', 0, G_OPTION_ARG_FILENAME, &socket_path_vm, + "Location of vhost-user Unix domain socket for guest vm", + "PATH" }, + { "socket-path-be", 'g', 0, G_OPTION_ARG_FILENAME, &socket_path_be, + "Location of vhost-user Unix domain socket for server vm", + "PATH" }, + { "notif-only", 'n', 0, G_OPTION_ARG_NONE, ¬ification, + "Only forward virtqueue notification", NULL}, + { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &print_cap, + "Output to stdout the backend capabilities in JSON format and exit", + NULL}, + { "verbose", 'v', 0, G_OPTION_ARG_NONE, &verbose, + "Be more verbose in output", NULL}, + { "debug", 'v', 0, G_OPTION_ARG_NONE, &debug, + "Enable debug output", NULL}, + { NULL } +}; + +/* Debug helpers */ +static void fmt_bytes(GString *s, uint8_t *bytes, int len) +{ + int32_t i; + for (i = 0; i < len; i++) { + if (i && i % 16 == 0) { + g_string_append_c(s, '\n'); + } + g_string_append_printf(s, "%02x ", bytes[i]); + } +} + +static void vscmi_dump_msg(struct virtio_scmi_request *msg, size_t len) +{ + g_autoptr(GString) s = g_string_new("\n"); + + g_string_append_printf(s, "hdr: %x\n", msg->hdr); + + fmt_bytes(s, (uint8_t *)msg->data, len-sizeof(msg->hdr)); + + g_info("%s: %s", __func__, s->str); +} + +static uint8_t vscmi_xfer(VuDev *dev, struct virtio_scmi_request *msg, size_t len) +{ + if (debug) { + vscmi_dump_msg(msg, len); + } + + return VIRTIO_SCMI_MSG_OK; +} + +/* + * vscmi_queue_set_started: set vq handler + * + */ +static void vscmi_copy_req(VuVirtqElement *src, VuVirtqElement *dst) +{ + memcpy(dst->in_sg[0].iov_base, src->out_sg[0].iov_base, src->out_sg[0].iov_len); + dst->in_sg[0].iov_len = src->out_sg[0].iov_len; + + dst->out_sg[0].iov_len = src->in_sg[0].iov_len; + + return; +} + +static int vscmi_notify_dst(int qidx, VuChnl *src_chnl, VuChnl *dst_chnl) +{ + VuVirtq *dst_vq; + + g_info("%s: path %s idx %d", __func__, src_chnl->path, qidx); + + if (qidx >= src_chnl->dev.parent.max_queues) { + g_info("%s: path %s not started yet", __func__, src_chnl->path); + return 0; + } + + if (!dst_chnl->started) + return 0; + + if (qidx >= dst_chnl->dev.parent.max_queues) { + g_info("%s: path %s not started yet", __func__, dst_chnl->path); + return 0; + } + dst_vq = vu_get_queue(&dst_chnl->dev.parent, qidx); + + g_info("%s: notify path %s idx %d", __func__, dst_chnl->path, qidx); + vu_queue_notify(&dst_chnl->dev.parent, dst_vq); + + return 1; +} + + +static int vscmi_forward_buffer(int qidx, VuChnl *src_chnl, VuChnl *dst_chnl) +{ + VuVirtq *dst_vq, *src_vq; + struct virtio_scmi_request *out_hdr; + struct virtio_scmi_response *in_hdr; + size_t out_hdr_len, in_hdr_len; + VuVirtqElement *elem; + + +// g_info("%s: path %s idx %d", __func__, src_chnl->path, qidx); + + if (qidx >= src_chnl->dev.parent.max_queues) { +// g_info("%s: path %s not started yet", __func__, src_chnl->path); + return 0; + } + + /* a request is already pending */ + if (src_chnl->elem[qidx]) { +// g_info("%s: path %s element pending", __func__, src_chnl->path); + return 1; + } + + src_vq = vu_get_queue(&src_chnl->dev.parent, qidx); + elem = vu_queue_pop(&src_chnl->dev.parent, src_vq, sizeof(VuVirtqElement)); + + /* No element available */ + if (!elem) { +// g_info("%s: path %s no new elements", __func__, src_chnl->path); + return 0; + } + + g_info("%s: path %s:%d got elements (in %d, out %d)", __func__, + src_chnl->path, qidx, elem->in_num, elem->out_num); + + src_chnl->elem[qidx] = elem; + + /* destination not ready */ + if (!dst_chnl->elem[qidx]) { +// g_info("%s: path %s no available elements", __func__, dst_chnl->path); + return 1; + } + + if (elem->out_num) { + out_hdr = elem->out_sg[0].iov_base; + out_hdr_len = elem->out_sg[0].iov_len; + g_info("%s: path %s sent OUT size %lu @ %p", __func__, src_chnl->path, out_hdr_len, out_hdr); + if (debug) + vscmi_xfer(&src_chnl->dev.parent, out_hdr, out_hdr_len); + vscmi_copy_req(src_chnl->elem[qidx], dst_chnl->elem[qidx]); +// g_info("%s: path %s copied IN elem size %lu", __func__, dst_chnl->path, dst_chnl->elem[qidx]->in_sg[0].iov_len); + } else + out_hdr_len = 0; + + if (elem->in_num) { + in_hdr = elem->in_sg[0].iov_base; + in_hdr_len = elem->out_sg[0].iov_len; + g_info("%s: path %s sent IN size %lu @ %p", __func__, src_chnl->path, in_hdr_len, in_hdr); + } else + in_hdr_len = 0; + + dst_vq = vu_get_queue(&dst_chnl->dev.parent, qidx); + vu_queue_push(&dst_chnl->dev.parent, dst_vq, dst_chnl->elem[qidx], out_hdr_len); + + dst_chnl->elem[qidx] = NULL; + + vu_queue_notify(&dst_chnl->dev.parent, dst_vq); + + return 1; +} + +static void vscmi_handle_ctrl(VuDev *dev, int qidx) +{ + VuChnl *dst_chnl, *src_chnl = container_of(dev, VuChnl, dev.parent); + VuScmi *scmi; + + if (src_chnl->guest) { + scmi = container_of(src_chnl, VuScmi, vm_dev); + dst_chnl = &scmi->be_dev; + } else { + scmi = container_of(src_chnl, VuScmi, be_dev); + dst_chnl = &scmi->vm_dev; + } + + g_info("%s: path %s idx %d", __func__, src_chnl->path, qidx); + + if (notification) + vscmi_notify_dst(qidx, src_chnl, dst_chnl); + else { + for (;;) { + if (!vscmi_forward_buffer(qidx, src_chnl, dst_chnl)) + break; + + if (!vscmi_forward_buffer(qidx, dst_chnl, src_chnl)) + break; + } + } +} + +/* Virtio helpers */ + +/* + * vscmi_get_features: return device features + * + */ +static uint64_t vscmi_get_features(VuDev *dev) +{ + uint64_t features = 1ull << VIRTIO_SCMI_F_P2A_CHANNELS; + features |= 1ull << VIRTIO_F_ACCESS_PLATFORM; + VuChnl *chnl = container_of(dev, VuChnl, dev.parent); + g_autoptr(GString) s = g_string_new(" "); + + g_string_append_printf(s, " 0x%" PRIx64 "", features); + g_info("%s: path %s features: %s", __func__, chnl->path, s->str); + + return features; +} + +/* + * vscmi_set_features: features set by driver + * + */ +static void vscmi_set_features(VuDev *dev, uint64_t features) +{ + if (verbose && features) { + VuChnl *chnl = container_of(dev, VuChnl, dev.parent); + g_autoptr(GString) s = g_string_new(" "); + + g_string_append_printf(s, " 0x%" PRIx64 "", features); + g_info("%s: path %s features: %s", __func__, chnl->path, s->str); + } +} + +/* + * vscmi_queue_set_started: set vq handler + * + */ +static void +vscmi_queue_set_started(VuDev *dev, int qidx, bool started) +{ + VuChnl *chnl = container_of(dev, VuChnl, dev.parent); + VuVirtq *vq = vu_get_queue(dev, qidx); + + g_info("%s: path %s idx %d:%d", __func__, chnl->path, qidx, started); + + chnl->started = started; + + vu_set_queue_handler(dev, vq, started ? vscmi_handle_ctrl : NULL); +} + +/* + * vscmi_process_msg: process messages of vhost-user interface + * + */ +static int wait_socket(VuChnl *device_ctx); +static void vscmi_destroy_channels(VuChnl *device_ctx); + +static int vscmi_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply) +{ + VuChnl *chnl = container_of(dev, VuChnl, dev.parent); + + if (msg->request == VHOST_USER_NONE) { + g_info("%s: path %s VHOST_USER_NONE", __func__, chnl->path); + vscmi_destroy_channels(chnl); + wait_socket(chnl); + return 1; + } + + if (debug) { + g_info("%s: path %s : request %d", __func__, chnl->path, msg->request); + } + + return 0; +} + +static const VuDevIface vuiface = { + .set_features = vscmi_set_features, + .get_features = vscmi_get_features, + .queue_set_started = vscmi_queue_set_started, + .process_msg = vscmi_process_msg, +}; + +static gboolean hangup(gpointer user_data) +{ + GMainLoop *loop = (GMainLoop *) user_data; + g_info("%s: caught hangup/quit signal, quitting main loop", __func__); + g_main_loop_quit(loop); + return true; +} + +static void vscmi_panic(VuDev *dev, const char *msg) +{ + g_info("%s\n", __func__); + g_critical("%s\n", msg); + exit(EXIT_FAILURE); +} + +/* Print vhost-user.json backend program capabilities */ +static void print_capabilities(void) +{ + g_info("%s\n", __func__); + printf("{\n"); + printf(" \"type\": \"scmi\"\n"); + printf("}\n"); +} + +static void vscmi_destroy_channels(VuChnl *device_ctx) +{ + int i; + + g_info("%s: %s", __func__, device_ctx->path); + if (device_ctx->socket != NULL) { + vug_deinit(&device_ctx->dev); + g_socket_close(device_ctx->socket, NULL); + g_object_unref(device_ctx->socket); + device_ctx->socket = NULL; + } + + for (i = 0; i < VHOST_USER_SCMI_MAX_QUEUES; i++) + device_ctx->elem[i] = NULL; + + unlink(device_ctx->path); +} + +static void vscmi_destroy(VuScmi *scmi) +{ + g_info("%s\n", __func__); + vscmi_destroy_channels(&scmi->vm_dev); + vscmi_destroy_channels(&scmi->be_dev); +} + +static int vscmi_init_channels(VuChnl *device_ctx) +{ + GError *error = NULL; + const gchar *socket_path = (const gchar *) device_ctx->path; + + /* + * Now create a vhost-user socket that we will receive messages + * on + */ + if (!socket_path) + return 0; + + g_autoptr(GSocketAddress) addr = g_unix_socket_address_new(socket_path); + g_autoptr(GSocket) bind_socket = g_socket_new(G_SOCKET_FAMILY_UNIX, G_SOCKET_TYPE_STREAM, + G_SOCKET_PROTOCOL_DEFAULT, &error); + + if (!g_socket_bind(bind_socket, addr, false, &error)) { + g_printerr("Failed to bind to socket at %s (%s).\n", + socket_path, error->message); + return VIRTIO_SCMI_MSG_ERR; + } + + if (!g_socket_listen(bind_socket, &error)) { + g_printerr("Failed to listen on socket %s (%s).\n", + socket_path, error->message); + return VIRTIO_SCMI_MSG_ERR; + } + + g_message("awaiting connection to %s", socket_path); + device_ctx->socket = g_socket_accept(bind_socket, NULL, &error); + if (!device_ctx->socket) { + g_printerr("Failed to accept on socket %s (%s).\n", + socket_path, error->message); + return VIRTIO_SCMI_MSG_ERR; + } + g_message("got a connection to %s", socket_path); + + if (!vug_init(&device_ctx->dev, VHOST_USER_SCMI_MAX_QUEUES, g_socket_get_fd(device_ctx->socket), + vscmi_panic, &vuiface)) { + g_printerr("Failed to initialize libvhost-user-glib.\n"); + return VIRTIO_SCMI_MSG_ERR; + } + + return VIRTIO_SCMI_MSG_OK; +} + +static gpointer server_wait_vm_thread(gpointer data) +{ + VuChnl *device_ctx = (VuChnl *)data; + + vscmi_init_channels(device_ctx); + + return NULL; +} + +static int wait_socket(VuChnl *device_ctx) +{ + + device_ctx->thread = g_thread_new(device_ctx->path, server_wait_vm_thread, device_ctx); + + return VIRTIO_SCMI_MSG_OK; +} + +int main(int argc, char *argv[]) +{ + GError *error = NULL; + GOptionContext *context; + g_autoptr(GSocket) socket = NULL; + VuScmi scmi = {0}; + + context = g_option_context_new("vhost-user emulation of SCMI device"); + g_option_context_add_main_entries(context, options, "vhost-user-scmi"); + if (!g_option_context_parse(context, &argc, &argv, &error)) { + g_printerr("option parsing failed: %s\n", error->message); + exit(1); + } + + if (print_cap) { + print_capabilities(); + exit(0); + } + + if (!socket_path_vm || !socket_path_be) { + g_printerr("Please specify --socket-path for both vm and be\n"); + exit(EXIT_FAILURE); + } + + if (verbose) { + g_log_set_handler(NULL, G_LOG_LEVEL_MASK, g_log_default_handler, NULL); + g_setenv("G_MESSAGES_DEBUG", "all", true); + } else { + g_log_set_handler(NULL, G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL | + G_LOG_LEVEL_ERROR, g_log_default_handler, NULL); + } + /* + * Now create a vhost-user sockets that we will receive messages + * on. Once we have our handler set up we can enter the glib main + * loop. + */ + scmi.be_dev.path = socket_path_be; + scmi.be_dev.guest = false; + scmi.be_dev.started = false; + + scmi.vm_dev.path = socket_path_vm; + scmi.vm_dev.guest = true; + scmi.vm_dev.started = false; + + wait_socket(&scmi.be_dev); + wait_socket(&scmi.vm_dev); + + /* + * Create the main loop first so all the various sources can be + * added. As well as catching signals we need to ensure vug_init + * can add it's GSource watches. + */ + + scmi.loop = g_main_loop_new(NULL, FALSE); + /* catch exit signals */ + g_unix_signal_add(SIGHUP, hangup, scmi.loop); + g_unix_signal_add(SIGINT, hangup, scmi.loop); + + g_message("entering main loop, awaiting messages"); + g_main_loop_run(scmi.loop); + g_message("finished main loop, cleaning up"); + + g_main_loop_unref(scmi.loop); + vscmi_destroy(&scmi); +} diff --git a/tools/vhost-user-scmi/meson.build b/tools/vhost-user-scmi/meson.build new file mode 100644 index 0000000000..7bb466c71d --- /dev/null +++ b/tools/vhost-user-scmi/meson.build @@ -0,0 +1,10 @@ +executable('vhost-user-scmi', files( + 'main.c'), + dependencies: [qemuutil, glib, gio], + install: true, + install_dir: get_option('libexecdir')) + +configure_file(input: '50-qemu-scmi.json.in', + output: '50-qemu-scmi.json', + configuration: config_host, + install_dir: qemu_datadir / 'vhost-user') |