summaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2022-07-20 22:33:35 +0100
committerPeter Maydell <peter.maydell@linaro.org>2022-07-20 22:33:35 +0100
commitfe16c833fdb74642c296fa96a472e39229cd4351 (patch)
treeba1d92b35dc4eb227d72a0c4839c06d4d375cc76 /accel
parent8ec4bc3c8c09366a9e4859de7c0a1860911e8424 (diff)
parentdb727a14108b5f7ee1273f94e8ccce428a646140 (diff)
Merge tag 'pull-migration-20220720c' of https://gitlab.com/dagrh/qemu into staging
Migration pull 2022-07-20 This replaces yesterdays pull and: a) Fixes some test build errors without TLS b) Reenabled the zlib acceleration on s390 now that we have Ilya's fix Hyman's dirty page rate limit set Ilya's fix for zlib vs migration Peter's postcopy-preempt Cleanup from Dan zero-copy tidy ups from Leo multifd doc fix from Juan Revert disable of zlib acceleration on s390x Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> # gpg: Signature made Wed 20 Jul 2022 12:18:56 BST # gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7 # gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full] # Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7 * tag 'pull-migration-20220720c' of https://gitlab.com/dagrh/qemu: (30 commits) Revert "gitlab: disable accelerated zlib for s390x" migration: Avoid false-positive on non-supported scenarios for zero-copy-send multifd: Document the locking of MultiFD{Send/Recv}Params migration/multifd: Report to user when zerocopy not working Add dirty-sync-missed-zero-copy migration stat QIOChannelSocket: Fix zero-copy flush returning code 1 when nothing sent migration: remove unreachable code after reading data tests: Add postcopy preempt tests tests: Add postcopy tls recovery migration test tests: Add postcopy tls migration test tests: Move MigrateCommon upper migration: Respect postcopy request order in preemption mode migration: Enable TLS for preempt channel migration: Export tls-[creds|hostname|authz] params to cmdline too migration: Add helpers to detect TLS capability migration: Add property x-postcopy-preempt-break-huge migration: Create the postcopy preempt channel asynchronously migration: Postcopy recover with preempt enabled migration: Postcopy preemption enablement migration: Postcopy preemption preparation on channel creation ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/kvm/kvm-all.c46
-rw-r--r--accel/stubs/kvm-stub.c5
2 files changed, 41 insertions, 10 deletions
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index ed8b6b896e..3187656570 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -45,6 +45,7 @@
#include "qemu/guest-random.h"
#include "sysemu/hw_accel.h"
#include "kvm-cpus.h"
+#include "sysemu/dirtylimit.h"
#include "hw/boards.h"
#include "monitor/stats.h"
@@ -477,6 +478,7 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
cpu->kvm_state = s;
cpu->vcpu_dirty = true;
cpu->dirty_pages = 0;
+ cpu->throttle_us_per_full = 0;
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
@@ -757,17 +759,20 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
}
/* Must be with slots_lock held */
-static uint64_t kvm_dirty_ring_reap_locked(KVMState *s)
+static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
{
int ret;
- CPUState *cpu;
uint64_t total = 0;
int64_t stamp;
stamp = get_clock();
- CPU_FOREACH(cpu) {
- total += kvm_dirty_ring_reap_one(s, cpu);
+ if (cpu) {
+ total = kvm_dirty_ring_reap_one(s, cpu);
+ } else {
+ CPU_FOREACH(cpu) {
+ total += kvm_dirty_ring_reap_one(s, cpu);
+ }
}
if (total) {
@@ -788,7 +793,7 @@ static uint64_t kvm_dirty_ring_reap_locked(KVMState *s)
* Currently for simplicity, we must hold BQL before calling this. We can
* consider to drop the BQL if we're clear with all the race conditions.
*/
-static uint64_t kvm_dirty_ring_reap(KVMState *s)
+static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
{
uint64_t total;
@@ -808,7 +813,7 @@ static uint64_t kvm_dirty_ring_reap(KVMState *s)
* reset below.
*/
kvm_slots_lock();
- total = kvm_dirty_ring_reap_locked(s);
+ total = kvm_dirty_ring_reap_locked(s, cpu);
kvm_slots_unlock();
return total;
@@ -855,7 +860,7 @@ static void kvm_dirty_ring_flush(void)
* vcpus out in a synchronous way.
*/
kvm_cpu_synchronize_kick_all();
- kvm_dirty_ring_reap(kvm_state);
+ kvm_dirty_ring_reap(kvm_state, NULL);
trace_kvm_dirty_ring_flush(1);
}
@@ -1399,7 +1404,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
* Not easy. Let's cross the fingers until it's fixed.
*/
if (kvm_state->kvm_dirty_ring_size) {
- kvm_dirty_ring_reap_locked(kvm_state);
+ kvm_dirty_ring_reap_locked(kvm_state, NULL);
} else {
kvm_slot_get_dirty_log(kvm_state, mem);
}
@@ -1467,11 +1472,16 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
*/
sleep(1);
+ /* keep sleeping so that dirtylimit not be interfered by reaper */
+ if (dirtylimit_in_service()) {
+ continue;
+ }
+
trace_kvm_dirty_ring_reaper("wakeup");
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
qemu_mutex_lock_iothread();
- kvm_dirty_ring_reap(s);
+ kvm_dirty_ring_reap(s, NULL);
qemu_mutex_unlock_iothread();
r->reaper_iteration++;
@@ -2315,6 +2325,11 @@ static void query_stats_cb(StatsResultList **result, StatsTarget target,
strList *names, strList *targets, Error **errp);
static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
+uint32_t kvm_dirty_ring_size(void)
+{
+ return kvm_state->kvm_dirty_ring_size;
+}
+
static int kvm_init(MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@@ -2967,8 +2982,19 @@ int kvm_cpu_exec(CPUState *cpu)
*/
trace_kvm_dirty_ring_full(cpu->cpu_index);
qemu_mutex_lock_iothread();
- kvm_dirty_ring_reap(kvm_state);
+ /*
+ * We throttle vCPU by making it sleep once it exit from kernel
+ * due to dirty ring full. In the dirtylimit scenario, reaping
+ * all vCPUs after a single vCPU dirty ring get full result in
+ * the miss of sleep, so just reap the ring-fulled vCPU.
+ */
+ if (dirtylimit_in_service()) {
+ kvm_dirty_ring_reap(kvm_state, cpu);
+ } else {
+ kvm_dirty_ring_reap(kvm_state, NULL);
+ }
qemu_mutex_unlock_iothread();
+ dirtylimit_vcpu_execute(cpu);
ret = 0;
break;
case KVM_EXIT_SYSTEM_EVENT:
diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c
index 3345882d85..2ac5f9c036 100644
--- a/accel/stubs/kvm-stub.c
+++ b/accel/stubs/kvm-stub.c
@@ -148,3 +148,8 @@ bool kvm_dirty_ring_enabled(void)
{
return false;
}
+
+uint32_t kvm_dirty_ring_size(void)
+{
+ return 0;
+}