[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 14/16] multifd: multifd_send_sync_main now returns negative on err
From: |
Dr. David Alan Gilbert (git) |
Subject: |
[PULL 14/16] multifd: multifd_send_sync_main now returns negative on error |
Date: |
Tue, 10 May 2022 09:33:53 +0100 |
From: Leonardo Bras <leobras@redhat.com>
Even though multifd_send_sync_main() currently emits error_reports, it's
callers don't really check it before continuing.
Change multifd_send_sync_main() to return -1 on error and 0 on success.
Also change all it's callers to make use of this change and possibly fail
earlier.
(This change is important to next patch on multifd zero copy
implementation, to make it sure an error in zero-copy flush does not go
unnoticed.
Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Message-Id: <20220507015759.840466-6-leobras@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
migration/multifd.c | 10 ++++++----
migration/multifd.h | 2 +-
migration/ram.c | 29 ++++++++++++++++++++++-------
3 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/migration/multifd.c b/migration/multifd.c
index 2a8c8570c3..15fb668e64 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -566,17 +566,17 @@ void multifd_save_cleanup(void)
multifd_send_state = NULL;
}
-void multifd_send_sync_main(QEMUFile *f)
+int multifd_send_sync_main(QEMUFile *f)
{
int i;
if (!migrate_use_multifd()) {
- return;
+ return 0;
}
if (multifd_send_state->pages->num) {
if (multifd_send_pages(f) < 0) {
error_report("%s: multifd_send_pages fail", __func__);
- return;
+ return -1;
}
}
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -589,7 +589,7 @@ void multifd_send_sync_main(QEMUFile *f)
if (p->quit) {
error_report("%s: channel %d has already quit", __func__, i);
qemu_mutex_unlock(&p->mutex);
- return;
+ return -1;
}
p->packet_num = multifd_send_state->packet_num++;
@@ -608,6 +608,8 @@ void multifd_send_sync_main(QEMUFile *f)
qemu_sem_wait(&p->sem_sync);
}
trace_multifd_send_sync_main(multifd_send_state->packet_num);
+
+ return 0;
}
static void *multifd_send_thread(void *opaque)
diff --git a/migration/multifd.h b/migration/multifd.h
index 7d0effcb03..bcf5992945 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -20,7 +20,7 @@ int multifd_load_cleanup(Error **errp);
bool multifd_recv_all_channels_created(void);
bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp);
void multifd_recv_sync_main(void);
-void multifd_send_sync_main(QEMUFile *f);
+int multifd_send_sync_main(QEMUFile *f);
int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset);
/* Multifd Compression flags */
diff --git a/migration/ram.c b/migration/ram.c
index a2489a2699..5f5e37f64d 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2909,6 +2909,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
{
RAMState **rsp = opaque;
RAMBlock *block;
+ int ret;
if (compress_threads_save_setup()) {
return -1;
@@ -2943,7 +2944,11 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
- multifd_send_sync_main(f);
+ ret = multifd_send_sync_main(f);
+ if (ret < 0) {
+ return ret;
+ }
+
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
@@ -3052,7 +3057,11 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
out:
if (ret >= 0
&& migration_is_setup_or_active(migrate_get_current()->state)) {
- multifd_send_sync_main(rs->f);
+ ret = multifd_send_sync_main(rs->f);
+ if (ret < 0) {
+ return ret;
+ }
+
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
ram_transferred_add(8);
@@ -3112,13 +3121,19 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
}
- if (ret >= 0) {
- multifd_send_sync_main(rs->f);
- qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
- qemu_fflush(f);
+ if (ret < 0) {
+ return ret;
}
- return ret;
+ ret = multifd_send_sync_main(rs->f);
+ if (ret < 0) {
+ return ret;
+ }
+
+ qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+ qemu_fflush(f);
+
+ return 0;
}
static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
--
2.36.0
- [PULL 02/16] tests: add more helper macros for creating TLS x509 certs, (continued)
- [PULL 02/16] tests: add more helper macros for creating TLS x509 certs, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 05/16] tests: convert XBZRLE migration test to use common helper, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 04/16] tests: add migration tests of TLS with x509 credentials, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 10/16] QIOChannel: Add flags on io_writev and introduce io_flush callback, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 08/16] tests: add multifd migration tests of TLS with x509 credentials, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 07/16] tests: add multifd migration tests of TLS with PSK credentials, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 09/16] tests: ensure migration status isn't reported as failed, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 11/16] QIOChannelSocket: Implement io_writev zero copy flag & io_flush for CONFIG_LINUX, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 12/16] migration: Add zero-copy-send parameter for QMP/HMP for Linux, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 06/16] tests: convert multifd migration tests to use common helper, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 14/16] multifd: multifd_send_sync_main now returns negative on error,
Dr. David Alan Gilbert (git) <=
- [PULL 13/16] migration: Add migrate_use_tls() helper, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 15/16] multifd: Send header packet without flags if zero-copy-send is enabled, Dr. David Alan Gilbert (git), 2022/05/10
- [PULL 16/16] multifd: Implement zero copy write in multifd migration (multifd-zero-copy), Dr. David Alan Gilbert (git), 2022/05/10
- Re: [PULL 00/16] migration queue, Dr. David Alan Gilbert, 2022/05/10
- Re: [PULL 00/16] migration queue, Daniel P . Berrangé, 2022/05/10
- Re: [PULL 00/16] migration queue, Dr. David Alan Gilbert, 2022/05/10
- Re: [PULL 00/16] migration queue, Leonardo Bras Soares Passos, 2022/05/10
- Re: [PULL 00/16] migration queue, Dr. David Alan Gilbert, 2022/05/11
- Re: [PULL 00/16] migration queue, Leonardo Bras Soares Passos, 2022/05/13
- Re: [PULL 00/16] migration queue, Dr. David Alan Gilbert, 2022/05/16