summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/virtio/virtgpu_vq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/virtio/virtgpu_vq.c')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c285
1 files changed, 217 insertions, 68 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 21c2de81f3e3..feddc29b3f2c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
+#include "virtgpu_trace.h"
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
@@ -38,30 +39,11 @@
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid)
-{
- int handle;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&vgdev->resource_idr_lock);
- handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
- spin_unlock(&vgdev->resource_idr_lock);
- idr_preload_end();
- *resid = handle;
-}
-
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
-{
- spin_lock(&vgdev->resource_idr_lock);
- idr_remove(&vgdev->resource_idr, id);
- spin_unlock(&vgdev->resource_idr_lock);
-}
-
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
+
schedule_work(&vgdev->ctrlq.dequeue_work);
}
@@ -69,6 +51,7 @@ void virtio_gpu_cursor_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
+
schedule_work(&vgdev->cursorq.dequeue_work);
}
@@ -96,10 +79,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
if (!vbuf)
return ERR_PTR(-ENOMEM);
- memset(vbuf, 0, VBUFFER_SIZE);
BUG_ON(size > MAX_INLINE_CMD_SIZE);
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
@@ -211,8 +193,19 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
- DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+
+ trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+
+ if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
+ if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ struct virtio_gpu_ctrl_hdr *cmd;
+ cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
+ DRM_ERROR("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
+ } else
+ DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+ }
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
u64 f = le64_to_cpu(resp->fence_id);
@@ -272,7 +265,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
return -ENODEV;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
- sgs[outcnt+incnt] = &vcmd;
+ sgs[outcnt + incnt] = &vcmd;
outcnt++;
if (vbuf->data_size) {
@@ -295,6 +288,9 @@ retry:
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
+ trace_virtio_gpu_cmd_queue(vq,
+ (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+
virtqueue_kick(vq);
}
@@ -317,7 +313,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int rc;
@@ -370,6 +366,9 @@ retry:
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
+ trace_virtio_gpu_cmd_queue(vq,
+ (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+
virtqueue_kick(vq);
}
@@ -381,14 +380,14 @@ retry:
}
/* just create gem objects for userspace and long lived objects,
- just use dma_alloced pages for the queue objects? */
+ * just use dma_alloced pages for the queue objects?
+ */
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- uint32_t format,
- uint32_t width,
- uint32_t height)
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -397,12 +396,13 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
- cmd_p->resource_id = cpu_to_le32(resource_id);
- cmd_p->format = cpu_to_le32(format);
- cmd_p->width = cpu_to_le32(width);
- cmd_p->height = cpu_to_le32(height);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ bo->created = true;
}
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
@@ -420,8 +420,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
-void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -432,7 +433,7 @@ void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
cmd_p->resource_id = cpu_to_le32(resource_id);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -479,19 +480,26 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint64_t offset,
+ struct virtio_gpu_object *bo,
+ uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+
+ if (use_dma_api)
+ dma_sync_sg_for_device(vgdev->vdev->dev.parent,
+ bo->pages->sgl, bo->pages->nents,
+ DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->r.width = width;
cmd_p->r.height = height;
@@ -506,7 +514,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_mem_entry *ents,
uint32_t nents,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -592,9 +600,47 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
}
}
spin_unlock(&vgdev->display_info_lock);
- wake_up(&vgdev->resp_wq);
+ wake_up_all(&vgdev->resp_wq);
}
+static int virtio_get_edid_block(void *data, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct virtio_gpu_resp_edid *resp = data;
+ size_t start = block * EDID_LENGTH;
+
+ if (start + len > le32_to_cpu(resp->size))
+ return -1;
+ memcpy(buf, resp->edid + start, len);
+ return 0;
+}
+
+static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_cmd_get_edid *cmd =
+ (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
+ struct virtio_gpu_resp_edid *resp =
+ (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
+ uint32_t scanout = le32_to_cpu(cmd->scanout);
+ struct virtio_gpu_output *output;
+ struct edid *new_edid, *old_edid;
+
+ if (scanout >= vgdev->num_scanouts)
+ return;
+ output = vgdev->outputs + scanout;
+
+ new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+ drm_mode_connector_update_edid_property(&output->conn, new_edid);
+
+ spin_lock(&vgdev->display_info_lock);
+ old_edid = output->edid;
+ output->edid = new_edid;
+ spin_unlock(&vgdev->display_info_lock);
+
+ kfree(old_edid);
+ wake_up(&vgdev->resp_wq);
+}
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
@@ -648,11 +694,14 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_get_capset *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- int max_size = vgdev->capsets[idx].max_size;
+ int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
+ struct virtio_gpu_drv_cap_cache *search_ent;
void *resp_buf;
- if (idx > vgdev->num_capsets)
+ *cache_p = NULL;
+
+ if (idx >= vgdev->num_capsets)
return -EINVAL;
if (version > vgdev->capsets[idx].max_version)
@@ -662,6 +711,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
if (!cache_ent)
return -ENOMEM;
+ max_size = vgdev->capsets[idx].max_size;
cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
if (!cache_ent->caps_cache) {
kfree(cache_ent);
@@ -681,9 +731,26 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
atomic_set(&cache_ent->is_valid, 0);
cache_ent->size = max_size;
spin_lock(&vgdev->display_info_lock);
- list_add_tail(&cache_ent->head, &vgdev->cap_cache);
+ /* Search while under lock in case it was added by another task. */
+ list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
+ if (search_ent->id == vgdev->capsets[idx].id &&
+ search_ent->version == version) {
+ *cache_p = search_ent;
+ break;
+ }
+ }
+ if (!*cache_p)
+ list_add_tail(&cache_ent->head, &vgdev->cap_cache);
spin_unlock(&vgdev->display_info_lock);
+ if (*cache_p) {
+ /* Entry was found, so free everything that was just created. */
+ kfree(resp_buf);
+ kfree(cache_ent->caps_cache);
+ kfree(cache_ent);
+ return 0;
+ }
+
cmd_p = virtio_gpu_alloc_cmd_resp
(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
sizeof(struct virtio_gpu_resp_capset) + max_size,
@@ -697,6 +764,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
return 0;
}
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_cmd_get_edid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ void *resp_buf;
+ int scanout;
+
+ if (WARN_ON(!vgdev->has_edid))
+ return -EINVAL;
+
+ for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
+ GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
+ sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
+ resp_buf);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
+ cmd_p->scanout = cpu_to_le32(scanout);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ }
+
+ return 0;
+}
+
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name)
{
@@ -709,8 +804,8 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
- strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
- cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
+ strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
+ cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -763,8 +858,9 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -772,28 +868,46 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
- *cmd_p = *rc_3d;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
- cmd_p->hdr.flags = 0;
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
+
+ cmd_p->target = cpu_to_le32(params->target);
+ cmd_p->bind = cpu_to_le32(params->bind);
+ cmd_p->depth = cpu_to_le32(params->depth);
+ cmd_p->array_size = cpu_to_le32(params->array_size);
+ cmd_p->last_level = cpu_to_le32(params->last_level);
+ cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
+ cmd_p->flags = cpu_to_le32(params->flags);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ bo->created = true;
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ struct virtio_gpu_object *bo,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+
+ if (use_dma_api)
+ dma_sync_sg_for_device(vgdev->vdev->dev.parent,
+ bo->pages->sgl, bo->pages->nents,
+ DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->box = *box;
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
@@ -805,7 +919,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -825,7 +939,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence)
+ uint32_t ctx_id, struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -845,42 +959,77 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si;
+ int si, nents;
+
+ if (WARN_ON_ONCE(!obj->created))
+ return -EINVAL;
if (!obj->pages) {
int ret;
+
ret = virtio_gpu_object_get_sg_table(vgdev, obj);
if (ret)
return ret;
}
+ if (use_dma_api) {
+ obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->pages->nents,
+ DMA_TO_DEVICE);
+ nents = obj->mapped;
+ } else {
+ nents = obj->pages->nents;
+ }
+
/* gets freed when the ring has consumed it */
- ents = kmalloc_array(obj->pages->nents,
- sizeof(struct virtio_gpu_mem_entry),
+ ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
- ents[si].addr = cpu_to_le64(sg_phys(sg));
+ for_each_sg(obj->pages->sgl, sg, nents, si) {
+ ents[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
}
- virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
- ents, obj->pages->nents,
+ virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
+ ents, nents,
fence);
- obj->hw_res_handle = resource_id;
return 0;
}
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+
+ if (use_dma_api && obj->mapped) {
+ struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
+ /* detach backing and wait for the host process it ... */
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
+ dma_fence_wait(&fence->f, true);
+ dma_fence_put(&fence->f);
+
+ /* ... then tear down iommu mappings */
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->mapped,
+ DMA_TO_DEVICE);
+ obj->mapped = 0;
+ } else {
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
+ }
+}
+
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{