aboutsummaryrefslogtreecommitdiff
path: root/virt/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-30 15:36:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-30 15:36:45 -0700
commit1bc87b00556e8f7ba30a1010471951c5b8f71114 (patch)
treee73c2d187e2dff0df97ed82e32b45e362b923117 /virt/kvm
parentacff987d94cbdb4049f3706bed1f1792f8ef6837 (diff)
parentf1c1da2bde712812a3e0f9a7a7ebe7a916a4b5f4 (diff)
Merge branch 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (75 commits) KVM: SVM: Keep intercepting task switching with NPT enabled KVM: s390: implement sigp external call KVM: s390: fix register setting KVM: s390: fix return value of kvm_arch_init_vm KVM: s390: check cpu_id prior to using it KVM: emulate lapic tsc deadline timer for guest x86: TSC deadline definitions KVM: Fix simultaneous NMIs KVM: x86 emulator: convert push %sreg/pop %sreg to direct decode KVM: x86 emulator: switch lds/les/lss/lfs/lgs to direct decode KVM: x86 emulator: streamline decode of segment registers KVM: x86 emulator: simplify OpMem64 decode KVM: x86 emulator: switch src decode to decode_operand() KVM: x86 emulator: qualify OpReg inhibit_byte_regs hack KVM: x86 emulator: switch OpImmUByte decode to decode_imm() KVM: x86 emulator: free up some flag bits near src, dst KVM: x86 emulator: switch src2 to generic decode_operand() KVM: x86 emulator: expand decode flags to 64 bits KVM: x86 emulator: split dst decode to a generic decode_operand() KVM: x86 emulator: move memop, memopp into emulation context ...
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/assigned-dev.c62
-rw-r--r--virt/kvm/coalesced_mmio.c131
-rw-r--r--virt/kvm/coalesced_mmio.h7
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/ioapic.c3
-rw-r--r--virt/kvm/kvm_main.c112
6 files changed, 200 insertions, 118 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index eaf3a50f976..3ad0925d23a 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -58,8 +58,6 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
{
struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- u32 vector;
- int index;
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
spin_lock(&assigned_dev->intx_lock);
@@ -68,31 +66,35 @@ static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
spin_unlock(&assigned_dev->intx_lock);
}
- if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
- index = find_index_from_host_irq(assigned_dev, irq);
- if (index >= 0) {
- vector = assigned_dev->
- guest_msix_entries[index].vector;
- kvm_set_irq(assigned_dev->kvm,
- assigned_dev->irq_source_id, vector, 1);
- }
- } else
+ kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
+ assigned_dev->guest_irq, 1);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef __KVM_HAVE_MSIX
+static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
+{
+ struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
+ int index = find_index_from_host_irq(assigned_dev, irq);
+ u32 vector;
+
+ if (index >= 0) {
+ vector = assigned_dev->guest_msix_entries[index].vector;
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
- assigned_dev->guest_irq, 1);
+ vector, 1);
+ }
return IRQ_HANDLED;
}
+#endif
/* Ack the irq line for an assigned device */
static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
{
- struct kvm_assigned_dev_kernel *dev;
-
- if (kian->gsi == -1)
- return;
-
- dev = container_of(kian, struct kvm_assigned_dev_kernel,
- ack_notifier);
+ struct kvm_assigned_dev_kernel *dev =
+ container_of(kian, struct kvm_assigned_dev_kernel,
+ ack_notifier);
kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
@@ -110,8 +112,9 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
static void deassign_guest_irq(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
- kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
- assigned_dev->ack_notifier.gsi = -1;
+ if (assigned_dev->ack_notifier.gsi != -1)
+ kvm_unregister_irq_ack_notifier(kvm,
+ &assigned_dev->ack_notifier);
kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
assigned_dev->guest_irq, 0);
@@ -143,7 +146,7 @@ static void deassign_host_irq(struct kvm *kvm,
for (i = 0; i < assigned_dev->entries_nr; i++)
free_irq(assigned_dev->host_msix_entries[i].vector,
- (void *)assigned_dev);
+ assigned_dev);
assigned_dev->entries_nr = 0;
kfree(assigned_dev->host_msix_entries);
@@ -153,7 +156,7 @@ static void deassign_host_irq(struct kvm *kvm,
/* Deal with MSI and INTx */
disable_irq(assigned_dev->host_irq);
- free_irq(assigned_dev->host_irq, (void *)assigned_dev);
+ free_irq(assigned_dev->host_irq, assigned_dev);
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
pci_disable_msi(assigned_dev->dev);
@@ -239,7 +242,7 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
* are going to be long delays in accepting, acking, etc.
*/
if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
- IRQF_ONESHOT, dev->irq_name, (void *)dev))
+ IRQF_ONESHOT, dev->irq_name, dev))
return -EIO;
return 0;
}
@@ -258,7 +261,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
dev->host_irq = dev->dev->irq;
if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
- 0, dev->irq_name, (void *)dev)) {
+ 0, dev->irq_name, dev)) {
pci_disable_msi(dev->dev);
return -EIO;
}
@@ -284,8 +287,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
for (i = 0; i < dev->entries_nr; i++) {
r = request_threaded_irq(dev->host_msix_entries[i].vector,
- NULL, kvm_assigned_dev_thread,
- 0, dev->irq_name, (void *)dev);
+ NULL, kvm_assigned_dev_thread_msix,
+ 0, dev->irq_name, dev);
if (r)
goto err;
}
@@ -293,7 +296,7 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
return 0;
err:
for (i -= 1; i >= 0; i--)
- free_irq(dev->host_msix_entries[i].vector, (void *)dev);
+ free_irq(dev->host_msix_entries[i].vector, dev);
pci_disable_msix(dev->dev);
return r;
}
@@ -406,7 +409,8 @@ static int assign_guest_irq(struct kvm *kvm,
if (!r) {
dev->irq_requested_type |= guest_irq_type;
- kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
+ if (dev->ack_notifier.gsi != -1)
+ kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
} else
kvm_free_irq_source_id(kvm, dev->irq_source_id);
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index fc8487564d1..a6ec206f36b 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -24,10 +24,19 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
gpa_t addr, int len)
{
- struct kvm_coalesced_mmio_zone *zone;
+ /* is it in a batchable area ?
+ * (addr,len) is fully included in
+ * (zone->addr, zone->size)
+ */
+
+ return (dev->zone.addr <= addr &&
+ addr + len <= dev->zone.addr + dev->zone.size);
+}
+
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+{
struct kvm_coalesced_mmio_ring *ring;
unsigned avail;
- int i;
/* Are we able to batch it ? */
@@ -37,25 +46,12 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
*/
ring = dev->kvm->coalesced_mmio_ring;
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
- if (avail < KVM_MAX_VCPUS) {
+ if (avail == 0) {
/* full */
return 0;
}
- /* is it in a batchable area ? */
-
- for (i = 0; i < dev->nb_zones; i++) {
- zone = &dev->zone[i];
-
- /* (addr,len) is fully included in
- * (zone->addr, zone->size)
- */
-
- if (zone->addr <= addr &&
- addr + len <= zone->addr + zone->size)
- return 1;
- }
- return 0;
+ return 1;
}
static int coalesced_mmio_write(struct kvm_io_device *this,
@@ -63,10 +59,16 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+
if (!coalesced_mmio_in_range(dev, addr, len))
return -EOPNOTSUPP;
- spin_lock(&dev->lock);
+ spin_lock(&dev->kvm->ring_lock);
+
+ if (!coalesced_mmio_has_room(dev)) {
+ spin_unlock(&dev->kvm->ring_lock);
+ return -EOPNOTSUPP;
+ }
/* copy data in first free entry of the ring */
@@ -75,7 +77,7 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
- spin_unlock(&dev->lock);
+ spin_unlock(&dev->kvm->ring_lock);
return 0;
}
@@ -83,6 +85,8 @@ static void coalesced_mmio_destructor(struct kvm_io_device *this)
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
+ list_del(&dev->list);
+
kfree(dev);
}
@@ -93,7 +97,6 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
- struct kvm_coalesced_mmio_dev *dev;
struct page *page;
int ret;
@@ -101,31 +104,18 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto out_err;
- kvm->coalesced_mmio_ring = page_address(page);
-
- ret = -ENOMEM;
- dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
- if (!dev)
- goto out_free_page;
- spin_lock_init(&dev->lock);
- kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
- dev->kvm = kvm;
- kvm->coalesced_mmio_dev = dev;
- mutex_lock(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
- mutex_unlock(&kvm->slots_lock);
- if (ret < 0)
- goto out_free_dev;
+ ret = 0;
+ kvm->coalesced_mmio_ring = page_address(page);
- return ret;
+ /*
+ * We're using this spinlock to sync access to the coalesced ring.
+ * The list doesn't need it's own lock since device registration and
+ * unregistration should only happen when kvm->slots_lock is held.
+ */
+ spin_lock_init(&kvm->ring_lock);
+ INIT_LIST_HEAD(&kvm->coalesced_zones);
-out_free_dev:
- kvm->coalesced_mmio_dev = NULL;
- kfree(dev);
-out_free_page:
- kvm->coalesced_mmio_ring = NULL;
- __free_page(page);
out_err:
return ret;
}
@@ -139,51 +129,50 @@ void kvm_coalesced_mmio_free(struct kvm *kvm)
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
+ int ret;
+ struct kvm_coalesced_mmio_dev *dev;
- if (dev == NULL)
- return -ENXIO;
+ dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
+ dev->kvm = kvm;
+ dev->zone = *zone;
mutex_lock(&kvm->slots_lock);
- if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- mutex_unlock(&kvm->slots_lock);
- return -ENOBUFS;
- }
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
+ zone->size, &dev->dev);
+ if (ret < 0)
+ goto out_free_dev;
+ list_add_tail(&dev->list, &kvm->coalesced_zones);
+ mutex_unlock(&kvm->slots_lock);
- dev->zone[dev->nb_zones] = *zone;
- dev->nb_zones++;
+ return ret;
+out_free_dev:
mutex_unlock(&kvm->slots_lock);
+
+ kfree(dev);
+
+ if (dev == NULL)
+ return -ENXIO;
+
return 0;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
- int i;
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
- struct kvm_coalesced_mmio_zone *z;
-
- if (dev == NULL)
- return -ENXIO;
+ struct kvm_coalesced_mmio_dev *dev, *tmp;
mutex_lock(&kvm->slots_lock);
- i = dev->nb_zones;
- while (i) {
- z = &dev->zone[i - 1];
-
- /* unregister all zones
- * included in (zone->addr, zone->size)
- */
-
- if (zone->addr <= z->addr &&
- z->addr + z->size <= zone->addr + zone->size) {
- dev->nb_zones--;
- *z = dev->zone[dev->nb_zones];
+ list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
+ if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ kvm_iodevice_destructor(&dev->dev);
}
- i--;
- }
mutex_unlock(&kvm->slots_lock);
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 8a5959e3535..b280c20444d 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -12,14 +12,13 @@
#ifdef CONFIG_KVM_MMIO
-#define KVM_COALESCED_MMIO_ZONE_MAX 100
+#include <linux/list.h>
struct kvm_coalesced_mmio_dev {
+ struct list_head list;
struct kvm_io_device dev;
struct kvm *kvm;
- spinlock_t lock;
- int nb_zones;
- struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
+ struct kvm_coalesced_mmio_zone zone;
};
int kvm_coalesced_mmio_init(struct kvm *kvm);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 73358d256fa..f59c1e8de7a 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -586,7 +586,8 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
kvm_iodevice_init(&p->dev, &ioeventfd_ops);
- ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
+ ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
+ &p->dev);
if (ret < 0)
goto unlock_fail;
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 8df1ca104a7..3eed61eb486 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -394,7 +394,8 @@ int kvm_ioapic_init(struct kvm *kvm)
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
mutex_lock(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
+ IOAPIC_MEM_LENGTH, &ioapic->dev);
mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kvm->arch.vioapic = NULL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index aefdda390f5..d9cfb782cb8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -47,6 +47,8 @@
#include <linux/srcu.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/bsearch.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -2391,24 +2393,92 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
int i;
for (i = 0; i < bus->dev_count; i++) {
- struct kvm_io_device *pos = bus->devs[i];
+ struct kvm_io_device *pos = bus->range[i].dev;
kvm_iodevice_destructor(pos);
}
kfree(bus);
}
+int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
+{
+ const struct kvm_io_range *r1 = p1;
+ const struct kvm_io_range *r2 = p2;
+
+ if (r1->addr < r2->addr)
+ return -1;
+ if (r1->addr + r1->len > r2->addr + r2->len)
+ return 1;
+ return 0;
+}
+
+int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
+ gpa_t addr, int len)
+{
+ if (bus->dev_count == NR_IOBUS_DEVS)
+ return -ENOSPC;
+
+ bus->range[bus->dev_count++] = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ .dev = dev,
+ };
+
+ sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
+ kvm_io_bus_sort_cmp, NULL);
+
+ return 0;
+}
+
+int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
+ gpa_t addr, int len)
+{
+ struct kvm_io_range *range, key;
+ int off;
+
+ key = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
+
+ range = bsearch(&key, bus->range, bus->dev_count,
+ sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
+ if (range == NULL)
+ return -ENOENT;
+
+ off = range - bus->range;
+
+ while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
+ off--;
+
+ return off;
+}
+
/* kvm_io_bus_write - called under kvm->slots_lock */
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
- int i;
+ int idx;
struct kvm_io_bus *bus;
+ struct kvm_io_range range;
+
+ range = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- for (i = 0; i < bus->dev_count; i++)
- if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
+ idx = kvm_io_bus_get_first_dev(bus, addr, len);
+ if (idx < 0)
+ return -EOPNOTSUPP;
+
+ while (idx < bus->dev_count &&
+ kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
+ if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
return 0;
+ idx++;
+ }
+
return -EOPNOTSUPP;
}
@@ -2416,19 +2486,33 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
- int i;
+ int idx;
struct kvm_io_bus *bus;
+ struct kvm_io_range range;
+
+ range = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- for (i = 0; i < bus->dev_count; i++)
- if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
+ idx = kvm_io_bus_get_first_dev(bus, addr, len);
+ if (idx < 0)
+ return -EOPNOTSUPP;
+
+ while (idx < bus->dev_count &&
+ kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
+ if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
return 0;
+ idx++;
+ }
+
return -EOPNOTSUPP;
}
/* Caller must hold slots_lock. */
-int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev)
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, struct kvm_io_device *dev)
{
struct kvm_io_bus *new_bus, *bus;
@@ -2440,7 +2524,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
if (!new_bus)
return -ENOMEM;
memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
- new_bus->devs[new_bus->dev_count++] = dev;
+ kvm_io_bus_insert_dev(new_bus, dev, addr, len);
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
@@ -2464,9 +2548,13 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
r = -ENOENT;
for (i = 0; i < new_bus->dev_count; i++)
- if (new_bus->devs[i] == dev) {
+ if (new_bus->range[i].dev == dev) {
r = 0;
- new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
+ new_bus->dev_count--;
+ new_bus->range[i] = new_bus->range[new_bus->dev_count];
+ sort(new_bus->range, new_bus->dev_count,
+ sizeof(struct kvm_io_range),
+ kvm_io_bus_sort_cmp, NULL);
break;
}