2 * qemu_migration.c: QEMU migration handling
4 * Copyright (C) 2006-2014 Red Hat, Inc.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library. If not, see
18 * <http://www.gnu.org/licenses/>.
25 #include <sys/socket.h>
28 # include <gnutls/gnutls.h>
29 # include <gnutls/x509.h>
34 #include "qemu_migration.h"
35 #include "qemu_monitor.h"
36 #include "qemu_domain.h"
37 #include "qemu_process.h"
38 #include "qemu_capabilities.h"
39 #include "qemu_command.h"
40 #include "qemu_cgroup.h"
41 #include "qemu_hotplug.h"
43 #include "domain_audit.h"
48 #include "datatypes.h"
52 #include "locking/domain_lock.h"
53 #include "rpc/virnetsocket.h"
54 #include "virstoragefile.h"
57 #include "virstring.h"
58 #include "virtypedparam.h"
60 #define VIR_FROM_THIS VIR_FROM_QEMU
62 VIR_LOG_INIT("qemu.qemu_migration");
64 VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST,
77 enum qemuMigrationCookieFlags {
78 QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
79 QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
80 QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
81 QEMU_MIGRATION_COOKIE_FLAG_NETWORK,
82 QEMU_MIGRATION_COOKIE_FLAG_NBD,
84 QEMU_MIGRATION_COOKIE_FLAG_LAST
87 VIR_ENUM_DECL(qemuMigrationCookieFlag);
88 VIR_ENUM_IMPL(qemuMigrationCookieFlag,
89 QEMU_MIGRATION_COOKIE_FLAG_LAST,
96 enum qemuMigrationCookieFeatures {
97 QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
98 QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
99 QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
100 QEMU_MIGRATION_COOKIE_NETWORK = (1 << QEMU_MIGRATION_COOKIE_FLAG_NETWORK),
101 QEMU_MIGRATION_COOKIE_NBD = (1 << QEMU_MIGRATION_COOKIE_FLAG_NBD),
104 typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
105 typedef qemuMigrationCookieGraphics *qemuMigrationCookieGraphicsPtr;
106 struct _qemuMigrationCookieGraphics {
114 typedef struct _qemuMigrationCookieNetData qemuMigrationCookieNetData;
115 typedef qemuMigrationCookieNetData *qemuMigrationCookieNetDataPtr;
116 struct _qemuMigrationCookieNetData {
117 int vporttype; /* enum virNetDevVPortProfile */
120 * Array of pointers to saved data. Each VIF will have it's own
126 typedef struct _qemuMigrationCookieNetwork qemuMigrationCookieNetwork;
127 typedef qemuMigrationCookieNetwork *qemuMigrationCookieNetworkPtr;
128 struct _qemuMigrationCookieNetwork {
129 /* How many virtual NICs are we saving data for? */
132 qemuMigrationCookieNetDataPtr net;
135 typedef struct _qemuMigrationCookieNBD qemuMigrationCookieNBD;
136 typedef qemuMigrationCookieNBD *qemuMigrationCookieNBDPtr;
137 struct _qemuMigrationCookieNBD {
138 int port; /* on which port does NBD server listen for incoming data */
141 typedef struct _qemuMigrationCookie qemuMigrationCookie;
142 typedef qemuMigrationCookie *qemuMigrationCookiePtr;
143 struct _qemuMigrationCookie {
145 unsigned int flagsMandatory;
147 /* Host properties */
148 unsigned char localHostuuid[VIR_UUID_BUFLEN];
149 unsigned char remoteHostuuid[VIR_UUID_BUFLEN];
151 char *remoteHostname;
153 /* Guest properties */
154 unsigned char uuid[VIR_UUID_BUFLEN];
157 /* If (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) */
161 /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */
162 qemuMigrationCookieGraphicsPtr graphics;
164 /* If (flags & QEMU_MIGRATION_COOKIE_PERSISTENT) */
165 virDomainDefPtr persistent;
167 /* If (flags & QEMU_MIGRATION_COOKIE_NETWORK) */
168 qemuMigrationCookieNetworkPtr network;
170 /* If (flags & QEMU_MIGRATION_COOKIE_NBD) */
171 qemuMigrationCookieNBDPtr nbd;
174 static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap)
178 VIR_FREE(grap->listen);
179 VIR_FREE(grap->tlsSubject);
185 qemuMigrationCookieNetworkFree(qemuMigrationCookieNetworkPtr network)
193 for (i = 0; i < network->nnets; i++)
194 VIR_FREE(network->net[i].portdata);
196 VIR_FREE(network->net);
201 static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
206 qemuMigrationCookieGraphicsFree(mig->graphics);
207 qemuMigrationCookieNetworkFree(mig->network);
209 VIR_FREE(mig->localHostname);
210 VIR_FREE(mig->remoteHostname);
212 VIR_FREE(mig->lockState);
213 VIR_FREE(mig->lockDriver);
221 qemuDomainExtractTLSSubject(const char *certdir)
223 char *certfile = NULL;
224 char *subject = NULL;
225 char *pemdata = NULL;
226 gnutls_datum_t pemdatum;
227 gnutls_x509_crt_t cert;
231 if (virAsprintf(&certfile, "%s/server-cert.pem", certdir) < 0)
234 if (virFileReadAll(certfile, 8192, &pemdata) < 0) {
235 virReportError(VIR_ERR_INTERNAL_ERROR,
236 _("unable to read server cert %s"), certfile);
240 ret = gnutls_x509_crt_init(&cert);
242 virReportError(VIR_ERR_INTERNAL_ERROR,
243 _("cannot initialize cert object: %s"),
244 gnutls_strerror(ret));
248 pemdatum.data = (unsigned char *)pemdata;
249 pemdatum.size = strlen(pemdata);
251 ret = gnutls_x509_crt_import(cert, &pemdatum, GNUTLS_X509_FMT_PEM);
253 virReportError(VIR_ERR_INTERNAL_ERROR,
254 _("cannot load cert data from %s: %s"),
255 certfile, gnutls_strerror(ret));
260 if (VIR_ALLOC_N(subject, subjectlen+1) < 0)
263 gnutls_x509_crt_get_dn(cert, subject, &subjectlen);
264 subject[subjectlen] = '\0';
278 static qemuMigrationCookieGraphicsPtr
279 qemuMigrationCookieGraphicsAlloc(virQEMUDriverPtr driver,
280 virDomainGraphicsDefPtr def)
282 qemuMigrationCookieGraphicsPtr mig = NULL;
283 const char *listenAddr;
284 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
286 if (VIR_ALLOC(mig) < 0)
289 mig->type = def->type;
290 if (mig->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
291 mig->port = def->data.vnc.port;
292 listenAddr = virDomainGraphicsListenGetAddress(def, 0);
294 listenAddr = cfg->vncListen;
298 !(mig->tlsSubject = qemuDomainExtractTLSSubject(cfg->vncTLSx509certdir)))
302 mig->port = def->data.spice.port;
304 mig->tlsPort = def->data.spice.tlsPort;
307 listenAddr = virDomainGraphicsListenGetAddress(def, 0);
309 listenAddr = cfg->spiceListen;
313 !(mig->tlsSubject = qemuDomainExtractTLSSubject(cfg->spiceTLSx509certdir)))
317 if (VIR_STRDUP(mig->listen, listenAddr) < 0)
324 qemuMigrationCookieGraphicsFree(mig);
330 static qemuMigrationCookieNetworkPtr
331 qemuMigrationCookieNetworkAlloc(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
334 qemuMigrationCookieNetworkPtr mig;
337 if (VIR_ALLOC(mig) < 0)
340 mig->nnets = def->nnets;
342 if (VIR_ALLOC_N(mig->net, def->nnets) <0)
345 for (i = 0; i < def->nnets; i++) {
346 virDomainNetDefPtr netptr;
347 virNetDevVPortProfilePtr vport;
349 netptr = def->nets[i];
350 vport = virDomainNetGetActualVirtPortProfile(netptr);
353 mig->net[i].vporttype = vport->virtPortType;
355 switch (vport->virtPortType) {
356 case VIR_NETDEV_VPORT_PROFILE_NONE:
357 case VIR_NETDEV_VPORT_PROFILE_8021QBG:
358 case VIR_NETDEV_VPORT_PROFILE_8021QBH:
360 case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
361 if (virNetDevOpenvswitchGetMigrateData(&mig->net[i].portdata,
362 netptr->ifname) != 0) {
363 virReportSystemError(VIR_ERR_INTERNAL_ERROR,
364 _("Unable to run command to get OVS port data for "
365 "interface %s"), netptr->ifname);
377 qemuMigrationCookieNetworkFree(mig);
381 static qemuMigrationCookiePtr
382 qemuMigrationCookieNew(virDomainObjPtr dom)
384 qemuDomainObjPrivatePtr priv = dom->privateData;
385 qemuMigrationCookiePtr mig = NULL;
388 if (VIR_ALLOC(mig) < 0)
392 name = priv->origname;
394 name = dom->def->name;
395 if (VIR_STRDUP(mig->name, name) < 0)
397 memcpy(mig->uuid, dom->def->uuid, VIR_UUID_BUFLEN);
399 if (!(mig->localHostname = virGetHostname()))
401 if (virGetHostUUID(mig->localHostuuid) < 0) {
402 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
403 _("Unable to obtain host UUID"));
410 qemuMigrationCookieFree(mig);
416 qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig,
417 virQEMUDriverPtr driver,
422 if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) {
423 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
424 _("Migration graphics data already present"));
428 for (i = 0; i < dom->def->ngraphics; i++) {
429 if (dom->def->graphics[i]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
430 if (!(mig->graphics =
431 qemuMigrationCookieGraphicsAlloc(driver, dom->def->graphics[i])))
433 mig->flags |= QEMU_MIGRATION_COOKIE_GRAPHICS;
443 qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig,
444 virQEMUDriverPtr driver,
447 qemuDomainObjPrivatePtr priv = dom->privateData;
449 if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
450 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
451 _("Migration lockstate data already present"));
455 if (virDomainObjGetState(dom, NULL) == VIR_DOMAIN_PAUSED) {
456 if (VIR_STRDUP(mig->lockState, priv->lockState) < 0)
459 if (virDomainLockProcessInquire(driver->lockManager, dom, &mig->lockState) < 0)
463 if (VIR_STRDUP(mig->lockDriver, virLockManagerPluginGetName(driver->lockManager)) < 0) {
464 VIR_FREE(mig->lockState);
468 mig->flags |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
469 mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
476 qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig,
479 if (mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) {
480 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
481 _("Migration persistent data already present"));
488 mig->persistent = dom->newDef;
489 mig->flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
490 mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_PERSISTENT;
496 qemuMigrationCookieAddNetwork(qemuMigrationCookiePtr mig,
497 virQEMUDriverPtr driver,
500 if (mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) {
501 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
502 _("Network migration data already present"));
506 if (dom->def->nnets > 0) {
507 mig->network = qemuMigrationCookieNetworkAlloc(driver, dom->def);
510 mig->flags |= QEMU_MIGRATION_COOKIE_NETWORK;
518 qemuMigrationCookieAddNBD(qemuMigrationCookiePtr mig,
519 virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
522 qemuDomainObjPrivatePtr priv = vm->privateData;
524 /* It is not a bug if there already is a NBD data */
526 VIR_ALLOC(mig->nbd) < 0)
529 mig->nbd->port = priv->nbdPort;
530 mig->flags |= QEMU_MIGRATION_COOKIE_NBD;
536 static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf,
537 qemuMigrationCookieGraphicsPtr grap)
539 virBufferAsprintf(buf, "<graphics type='%s' port='%d' listen='%s'",
540 virDomainGraphicsTypeToString(grap->type),
541 grap->port, grap->listen);
542 if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
543 virBufferAsprintf(buf, " tlsPort='%d'", grap->tlsPort);
544 if (grap->tlsSubject) {
545 virBufferAddLit(buf, ">\n");
546 virBufferAdjustIndent(buf, 2);
547 virBufferEscapeString(buf, "<cert info='subject' value='%s'/>\n", grap->tlsSubject);
548 virBufferAdjustIndent(buf, -2);
549 virBufferAddLit(buf, "</graphics>\n");
551 virBufferAddLit(buf, "/>\n");
557 qemuMigrationCookieNetworkXMLFormat(virBufferPtr buf,
558 qemuMigrationCookieNetworkPtr optr)
563 for (i = 0; i < optr->nnets; i++) {
564 /* If optr->net[i].vporttype is not set, there is nothing to transfer */
565 if (optr->net[i].vporttype != VIR_NETDEV_VPORT_PROFILE_NONE) {
567 virBufferAddLit(buf, "<network>\n");
568 virBufferAdjustIndent(buf, 2);
571 virBufferAsprintf(buf, "<interface index='%zu' vporttype='%s'",
572 i, virNetDevVPortTypeToString(optr->net[i].vporttype));
573 if (optr->net[i].portdata) {
574 virBufferAddLit(buf, ">\n");
575 virBufferAdjustIndent(buf, 2);
576 virBufferEscapeString(buf, "<portdata>%s</portdata>\n",
577 optr->net[i].portdata);
578 virBufferAdjustIndent(buf, -2);
579 virBufferAddLit(buf, "</interface>\n");
581 virBufferAddLit(buf, "/>\n");
586 virBufferAdjustIndent(buf, -2);
587 virBufferAddLit(buf, "</network>\n");
593 qemuMigrationCookieXMLFormat(virQEMUDriverPtr driver,
595 qemuMigrationCookiePtr mig)
597 char uuidstr[VIR_UUID_STRING_BUFLEN];
598 char hostuuidstr[VIR_UUID_STRING_BUFLEN];
601 virUUIDFormat(mig->uuid, uuidstr);
602 virUUIDFormat(mig->localHostuuid, hostuuidstr);
604 virBufferAddLit(buf, "<qemu-migration>\n");
605 virBufferAdjustIndent(buf, 2);
606 virBufferEscapeString(buf, "<name>%s</name>\n", mig->name);
607 virBufferAsprintf(buf, "<uuid>%s</uuid>\n", uuidstr);
608 virBufferEscapeString(buf, "<hostname>%s</hostname>\n", mig->localHostname);
609 virBufferAsprintf(buf, "<hostuuid>%s</hostuuid>\n", hostuuidstr);
611 for (i = 0; i < QEMU_MIGRATION_COOKIE_FLAG_LAST; i++) {
612 if (mig->flagsMandatory & (1 << i))
613 virBufferAsprintf(buf, "<feature name='%s'/>\n",
614 qemuMigrationCookieFlagTypeToString(i));
617 if ((mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
619 qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics);
621 if ((mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
623 virBufferAsprintf(buf, "<lockstate driver='%s'>\n",
625 virBufferAdjustIndent(buf, 2);
626 virBufferAsprintf(buf, "<leases>%s</leases>\n",
628 virBufferAdjustIndent(buf, -2);
629 virBufferAddLit(buf, "</lockstate>\n");
632 if ((mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) &&
634 if (qemuDomainDefFormatBuf(driver,
636 VIR_DOMAIN_XML_INACTIVE |
637 VIR_DOMAIN_XML_SECURE |
638 VIR_DOMAIN_XML_MIGRATABLE,
643 if ((mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) && mig->network)
644 qemuMigrationCookieNetworkXMLFormat(buf, mig->network);
646 if ((mig->flags & QEMU_MIGRATION_COOKIE_NBD) && mig->nbd) {
647 virBufferAddLit(buf, "<nbd");
649 virBufferAsprintf(buf, " port='%d'", mig->nbd->port);
650 virBufferAddLit(buf, "/>\n");
653 virBufferAdjustIndent(buf, -2);
654 virBufferAddLit(buf, "</qemu-migration>\n");
659 static char *qemuMigrationCookieXMLFormatStr(virQEMUDriverPtr driver,
660 qemuMigrationCookiePtr mig)
662 virBuffer buf = VIR_BUFFER_INITIALIZER;
664 if (qemuMigrationCookieXMLFormat(driver, &buf, mig) < 0) {
665 virBufferFreeAndReset(&buf);
669 if (virBufferError(&buf)) {
671 virBufferFreeAndReset(&buf);
675 return virBufferContentAndReset(&buf);
679 static qemuMigrationCookieGraphicsPtr
680 qemuMigrationCookieGraphicsXMLParse(xmlXPathContextPtr ctxt)
682 qemuMigrationCookieGraphicsPtr grap;
685 if (VIR_ALLOC(grap) < 0)
688 if (!(tmp = virXPathString("string(./graphics/@type)", ctxt))) {
689 virReportError(VIR_ERR_INTERNAL_ERROR,
690 "%s", _("missing type attribute in migration data"));
693 if ((grap->type = virDomainGraphicsTypeFromString(tmp)) < 0) {
694 virReportError(VIR_ERR_INTERNAL_ERROR,
695 _("unknown graphics type %s"), tmp);
700 if (virXPathInt("string(./graphics/@port)", ctxt, &grap->port) < 0) {
701 virReportError(VIR_ERR_INTERNAL_ERROR,
702 "%s", _("missing port attribute in migration data"));
705 if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
706 if (virXPathInt("string(./graphics/@tlsPort)", ctxt, &grap->tlsPort) < 0) {
707 virReportError(VIR_ERR_INTERNAL_ERROR,
708 "%s", _("missing tlsPort attribute in migration data"));
712 if (!(grap->listen = virXPathString("string(./graphics/@listen)", ctxt))) {
713 virReportError(VIR_ERR_INTERNAL_ERROR,
714 "%s", _("missing listen attribute in migration data"));
718 grap->tlsSubject = virXPathString("string(./graphics/cert[@info='subject']/@value)", ctxt);
723 qemuMigrationCookieGraphicsFree(grap);
728 static qemuMigrationCookieNetworkPtr
729 qemuMigrationCookieNetworkXMLParse(xmlXPathContextPtr ctxt)
731 qemuMigrationCookieNetworkPtr optr;
734 xmlNodePtr *interfaces = NULL;
736 xmlNodePtr save_ctxt = ctxt->node;
738 if (VIR_ALLOC(optr) < 0)
741 if ((n = virXPathNodeSet("./network/interface", ctxt, &interfaces)) < 0) {
742 virReportError(VIR_ERR_INTERNAL_ERROR,
743 "%s", _("missing interface information"));
748 if (VIR_ALLOC_N(optr->net, optr->nnets) < 0)
751 for (i = 0; i < n; i++) {
752 /* portdata is optional, and may not exist */
753 ctxt->node = interfaces[i];
754 optr->net[i].portdata = virXPathString("string(./portdata[1])", ctxt);
756 if (!(vporttype = virXMLPropString(interfaces[i], "vporttype"))) {
757 virReportError(VIR_ERR_INTERNAL_ERROR,
758 "%s", _("missing vporttype attribute in migration data"));
761 optr->net[i].vporttype = virNetDevVPortTypeFromString(vporttype);
764 VIR_FREE(interfaces);
767 ctxt->node = save_ctxt;
771 VIR_FREE(interfaces);
772 qemuMigrationCookieNetworkFree(optr);
779 qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
780 virQEMUDriverPtr driver,
782 xmlXPathContextPtr ctxt,
785 char uuidstr[VIR_UUID_STRING_BUFLEN];
787 xmlNodePtr *nodes = NULL;
790 virCapsPtr caps = NULL;
792 if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
795 /* We don't store the uuid, name, hostname, or hostuuid
796 * values. We just compare them to local data to do some
797 * sanity checking on migration operation
800 /* Extract domain name */
801 if (!(tmp = virXPathString("string(./name[1])", ctxt))) {
802 virReportError(VIR_ERR_INTERNAL_ERROR,
803 "%s", _("missing name element in migration data"));
806 if (STRNEQ(tmp, mig->name)) {
807 virReportError(VIR_ERR_INTERNAL_ERROR,
808 _("Incoming cookie data had unexpected name %s vs %s"),
814 /* Extract domain uuid */
815 tmp = virXPathString("string(./uuid[1])", ctxt);
817 virReportError(VIR_ERR_INTERNAL_ERROR,
818 "%s", _("missing uuid element in migration data"));
821 virUUIDFormat(mig->uuid, uuidstr);
822 if (STRNEQ(tmp, uuidstr)) {
823 virReportError(VIR_ERR_INTERNAL_ERROR,
824 _("Incoming cookie data had unexpected UUID %s vs %s"),
829 /* Check & forbid "localhost" migration */
830 if (!(mig->remoteHostname = virXPathString("string(./hostname[1])", ctxt))) {
831 virReportError(VIR_ERR_INTERNAL_ERROR,
832 "%s", _("missing hostname element in migration data"));
835 if (STREQ(mig->remoteHostname, mig->localHostname)) {
836 virReportError(VIR_ERR_INTERNAL_ERROR,
837 _("Attempt to migrate guest to the same host %s"),
838 mig->remoteHostname);
842 if (!(tmp = virXPathString("string(./hostuuid[1])", ctxt))) {
843 virReportError(VIR_ERR_INTERNAL_ERROR,
844 "%s", _("missing hostuuid element in migration data"));
847 if (virUUIDParse(tmp, mig->remoteHostuuid) < 0) {
848 virReportError(VIR_ERR_INTERNAL_ERROR,
849 "%s", _("malformed hostuuid element in migration data"));
852 if (memcmp(mig->remoteHostuuid, mig->localHostuuid, VIR_UUID_BUFLEN) == 0) {
853 virReportError(VIR_ERR_INTERNAL_ERROR,
854 _("Attempt to migrate guest to the same host %s"),
860 /* Check to ensure all mandatory features from XML are also
861 * present in 'flags' */
862 if ((n = virXPathNodeSet("./feature", ctxt, &nodes)) < 0)
865 for (i = 0; i < n; i++) {
867 char *str = virXMLPropString(nodes[i], "name");
869 virReportError(VIR_ERR_INTERNAL_ERROR,
870 "%s", _("missing feature name"));
874 if ((val = qemuMigrationCookieFlagTypeFromString(str)) < 0) {
875 virReportError(VIR_ERR_INTERNAL_ERROR,
876 _("Unknown migration cookie feature %s"),
882 if ((flags & (1 << val)) == 0) {
883 virReportError(VIR_ERR_INTERNAL_ERROR,
884 _("Unsupported migration cookie feature %s"),
892 if ((flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
893 virXPathBoolean("count(./graphics) > 0", ctxt) &&
894 (!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt))))
897 if ((flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
898 virXPathBoolean("count(./lockstate) > 0", ctxt)) {
899 mig->lockDriver = virXPathString("string(./lockstate[1]/@driver)", ctxt);
900 if (!mig->lockDriver) {
901 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
902 _("Missing lock driver name in migration cookie"));
905 mig->lockState = virXPathString("string(./lockstate[1]/leases[1])", ctxt);
906 if (mig->lockState && STREQ(mig->lockState, ""))
907 VIR_FREE(mig->lockState);
910 if ((flags & QEMU_MIGRATION_COOKIE_PERSISTENT) &&
911 virXPathBoolean("count(./domain) > 0", ctxt)) {
912 if ((n = virXPathNodeSet("./domain", ctxt, &nodes)) > 1) {
913 virReportError(VIR_ERR_INTERNAL_ERROR,
914 _("Too many domain elements in "
915 "migration cookie: %d"),
919 mig->persistent = virDomainDefParseNode(doc, nodes[0],
920 caps, driver->xmlopt,
921 -1, VIR_DOMAIN_XML_INACTIVE);
922 if (!mig->persistent) {
923 /* virDomainDefParseNode already reported
930 if ((flags & QEMU_MIGRATION_COOKIE_NETWORK) &&
931 virXPathBoolean("count(./network) > 0", ctxt) &&
932 (!(mig->network = qemuMigrationCookieNetworkXMLParse(ctxt))))
935 if (flags & QEMU_MIGRATION_COOKIE_NBD &&
936 virXPathBoolean("boolean(./nbd)", ctxt)) {
939 if (VIR_ALLOC(mig->nbd) < 0)
942 port = virXPathString("string(./nbd/@port)", ctxt);
943 if (port && virStrToLong_i(port, NULL, 10, &mig->nbd->port) < 0) {
944 virReportError(VIR_ERR_INTERNAL_ERROR,
945 _("Malformed nbd port '%s'"),
953 virObjectUnref(caps);
959 virObjectUnref(caps);
965 qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig,
966 virQEMUDriverPtr driver,
970 xmlDocPtr doc = NULL;
971 xmlXPathContextPtr ctxt = NULL;
974 VIR_DEBUG("xml=%s", NULLSTR(xml));
976 if (!(doc = virXMLParseStringCtxt(xml, _("(qemu_migration_cookie)"), &ctxt)))
979 ret = qemuMigrationCookieXMLParse(mig, driver, doc, ctxt, flags);
982 xmlXPathFreeContext(ctxt);
990 qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
991 virQEMUDriverPtr driver,
997 if (!cookieout || !cookieoutlen)
1002 if (flags & QEMU_MIGRATION_COOKIE_GRAPHICS &&
1003 qemuMigrationCookieAddGraphics(mig, driver, dom) < 0)
1006 if (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE &&
1007 qemuMigrationCookieAddLockstate(mig, driver, dom) < 0)
1010 if (flags & QEMU_MIGRATION_COOKIE_PERSISTENT &&
1011 qemuMigrationCookieAddPersistent(mig, dom) < 0)
1014 if (flags & QEMU_MIGRATION_COOKIE_NETWORK &&
1015 qemuMigrationCookieAddNetwork(mig, driver, dom) < 0) {
1019 if ((flags & QEMU_MIGRATION_COOKIE_NBD) &&
1020 qemuMigrationCookieAddNBD(mig, driver, dom) < 0)
1023 if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
1026 *cookieoutlen = strlen(*cookieout) + 1;
1028 VIR_DEBUG("cookielen=%d cookie=%s", *cookieoutlen, *cookieout);
1034 static qemuMigrationCookiePtr
1035 qemuMigrationEatCookie(virQEMUDriverPtr driver,
1036 virDomainObjPtr dom,
1037 const char *cookiein,
1041 qemuMigrationCookiePtr mig = NULL;
1043 /* Parse & validate incoming cookie (if any) */
1044 if (cookiein && cookieinlen &&
1045 cookiein[cookieinlen-1] != '\0') {
1046 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
1047 _("Migration cookie was not NULL terminated"));
1051 VIR_DEBUG("cookielen=%d cookie='%s'", cookieinlen, NULLSTR(cookiein));
1053 if (!(mig = qemuMigrationCookieNew(dom)))
1056 if (cookiein && cookieinlen &&
1057 qemuMigrationCookieXMLParseStr(mig,
1063 if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
1064 if (!mig->lockDriver) {
1065 if (virLockManagerPluginUsesState(driver->lockManager)) {
1066 virReportError(VIR_ERR_INTERNAL_ERROR,
1067 _("Missing %s lock state for migration cookie"),
1068 virLockManagerPluginGetName(driver->lockManager));
1071 } else if (STRNEQ(mig->lockDriver,
1072 virLockManagerPluginGetName(driver->lockManager))) {
1073 virReportError(VIR_ERR_INTERNAL_ERROR,
1074 _("Source host lock driver %s different from target %s"),
1076 virLockManagerPluginGetName(driver->lockManager));
1084 qemuMigrationCookieFree(mig);
1089 qemuMigrationStoreDomainState(virDomainObjPtr vm)
1091 qemuDomainObjPrivatePtr priv = vm->privateData;
1092 priv->preMigrationState = virDomainObjGetState(vm, NULL);
1094 VIR_DEBUG("Storing pre-migration state=%d domain=%p",
1095 priv->preMigrationState, vm);
1098 /* Returns true if the domain was resumed, false otherwise */
1100 qemuMigrationRestoreDomainState(virConnectPtr conn, virDomainObjPtr vm)
1102 virQEMUDriverPtr driver = conn->privateData;
1103 qemuDomainObjPrivatePtr priv = vm->privateData;
1104 int state = virDomainObjGetState(vm, NULL);
1107 VIR_DEBUG("driver=%p, vm=%p, pre-mig-state=%d, state=%d",
1108 driver, vm, priv->preMigrationState, state);
1110 if (state == VIR_DOMAIN_PAUSED &&
1111 priv->preMigrationState == VIR_DOMAIN_RUNNING) {
1112 /* This is basically the only restore possibility that's safe
1113 * and we should attempt to do */
1115 VIR_DEBUG("Restoring pre-migration state due to migration error");
1117 /* we got here through some sort of failure; start the domain again */
1118 if (qemuProcessStartCPUs(driver, vm, conn,
1119 VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
1120 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
1121 /* Hm, we already know we are in error here. We don't want to
1122 * overwrite the previous error, though, so we just throw something
1123 * to the logs and hope for the best */
1124 VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name);
1131 priv->preMigrationState = VIR_DOMAIN_NOSTATE;
1136 * qemuMigrationStartNBDServer:
1137 * @driver: qemu driver
1140 * Starts NBD server. This is a newer method to copy
1141 * storage during migration than using 'blk' and 'inc'
1142 * arguments in 'migrate' monitor command.
1143 * Error is reported here.
1145 * Returns 0 on success, -1 otherwise.
1148 qemuMigrationStartNBDServer(virQEMUDriverPtr driver,
1150 const char *listenAddr)
1153 qemuDomainObjPrivatePtr priv = vm->privateData;
1154 unsigned short port = 0;
1155 char *diskAlias = NULL;
1158 for (i = 0; i < vm->def->ndisks; i++) {
1159 virDomainDiskDefPtr disk = vm->def->disks[i];
1161 /* skip shared, RO and source-less disks */
1162 if (disk->shared || disk->readonly || !virDomainDiskGetSource(disk))
1165 VIR_FREE(diskAlias);
1166 if (virAsprintf(&diskAlias, "%s%s",
1167 QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
1170 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1171 QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
1175 ((virPortAllocatorAcquire(driver->migrationPorts, &port) < 0) ||
1176 (qemuMonitorNBDServerStart(priv->mon, listenAddr, port) < 0))) {
1177 qemuDomainObjExitMonitor(driver, vm);
1181 if (qemuMonitorNBDServerAdd(priv->mon, diskAlias, true) < 0) {
1182 qemuDomainObjExitMonitor(driver, vm);
1185 qemuDomainObjExitMonitor(driver, vm);
1188 priv->nbdPort = port;
1192 VIR_FREE(diskAlias);
1194 virPortAllocatorRelease(driver->remotePorts, port);
1199 * qemuMigrationDriveMirror:
1200 * @driver: qemu driver
1202 * @mig: migration cookie
1203 * @host: where are we migrating to
1204 * @speed: how much should the copying be limited
1205 * @migrate_flags: migrate monitor command flags
1207 * Run drive-mirror to feed NBD server running on dst and wait
1208 * till the process switches into another phase where writes go
1209 * simultaneously to both source and destination. And this switch
1210 * is what we are waiting for before proceeding with the next
1211 * disk. On success, update @migrate_flags so we don't tell
1212 * 'migrate' command to do the very same operation.
1214 * Returns 0 on success (@migrate_flags updated),
1218 qemuMigrationDriveMirror(virQEMUDriverPtr driver,
1220 qemuMigrationCookiePtr mig,
1222 unsigned long speed,
1223 unsigned int *migrate_flags)
1225 qemuDomainObjPrivatePtr priv = vm->privateData;
1229 size_t i, lastGood = 0;
1230 char *diskAlias = NULL;
1231 char *nbd_dest = NULL;
1232 char *hoststr = NULL;
1233 unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
1234 virErrorPtr err = NULL;
1236 if (!(*migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
1237 QEMU_MONITOR_MIGRATE_NON_SHARED_INC)))
1241 /* Destination doesn't support NBD server.
1242 * Fall back to previous implementation. */
1243 VIR_DEBUG("Destination doesn't support NBD server "
1244 "Falling back to previous implementation.");
1248 /* steal NBD port and thus prevent its propagation back to destination */
1249 port = mig->nbd->port;
1252 /* escape literal IPv6 address */
1253 if (strchr(host, ':')) {
1254 if (virAsprintf(&hoststr, "[%s]", host) < 0)
1256 } else if (VIR_STRDUP(hoststr, host) < 0) {
1260 if (*migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC)
1261 mirror_flags |= VIR_DOMAIN_BLOCK_REBASE_SHALLOW;
1263 for (i = 0; i < vm->def->ndisks; i++) {
1264 virDomainDiskDefPtr disk = vm->def->disks[i];
1265 virDomainBlockJobInfo info;
1267 /* skip shared, RO and source-less disks */
1268 if (disk->shared || disk->readonly || !virDomainDiskGetSource(disk))
1271 VIR_FREE(diskAlias);
1273 if ((virAsprintf(&diskAlias, "%s%s",
1274 QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0) ||
1275 (virAsprintf(&nbd_dest, "nbd:%s:%d:exportname=%s",
1276 hoststr, port, diskAlias) < 0))
1279 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1280 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
1282 mon_ret = qemuMonitorDriveMirror(priv->mon, diskAlias, nbd_dest,
1283 NULL, speed, mirror_flags);
1284 qemuDomainObjExitMonitor(driver, vm);
1291 /* wait for completion */
1293 /* Poll every 500ms for progress & to allow cancellation */
1294 struct timespec ts = { .tv_sec = 0, .tv_nsec = 500 * 1000 * 1000ull };
1296 memset(&info, 0, sizeof(info));
1298 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1299 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
1301 if (priv->job.asyncAbort) {
1302 /* explicitly do this *after* we entered the monitor,
1303 * as this is a critical section so we are guaranteed
1304 * priv->job.asyncAbort will not change */
1305 qemuDomainObjExitMonitor(driver, vm);
1306 virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
1307 qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
1308 _("canceled by client"));
1311 mon_ret = qemuMonitorBlockJob(priv->mon, diskAlias, NULL, 0,
1312 &info, BLOCK_JOB_INFO, true);
1313 qemuDomainObjExitMonitor(driver, vm);
1318 if (info.cur == info.end) {
1319 VIR_DEBUG("Drive mirroring of '%s' completed", diskAlias);
1323 /* XXX Frankly speaking, we should listen to the events,
1324 * instead of doing this. But this works for now and we
1325 * are doing something similar in migration itself anyway */
1327 virObjectUnlock(vm);
1329 nanosleep(&ts, NULL);
1335 /* Okay, copied. Modify migrate_flags */
1336 *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
1337 QEMU_MONITOR_MIGRATE_NON_SHARED_INC);
1341 VIR_FREE(diskAlias);
1347 /* don't overwrite any errors */
1348 err = virSaveLastError();
1349 /* cancel any outstanding jobs */
1351 virDomainDiskDefPtr disk = vm->def->disks[--lastGood];
1353 /* skip shared, RO disks */
1354 if (disk->shared || disk->readonly || !virDomainDiskGetSource(disk))
1357 VIR_FREE(diskAlias);
1358 if (virAsprintf(&diskAlias, "%s%s",
1359 QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
1361 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1362 QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
1363 if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, 0,
1364 NULL, BLOCK_JOB_ABORT, true) < 0) {
1365 VIR_WARN("Unable to cancel block-job on '%s'", diskAlias);
1367 qemuDomainObjExitMonitor(driver, vm);
1369 VIR_WARN("Unable to enter monitor. No block job cancelled");
1380 qemuMigrationStopNBDServer(virQEMUDriverPtr driver,
1382 qemuMigrationCookiePtr mig)
1384 qemuDomainObjPrivatePtr priv = vm->privateData;
1389 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1390 QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
1393 if (qemuMonitorNBDServerStop(priv->mon) < 0)
1394 VIR_WARN("Unable to stop NBD server");
1396 qemuDomainObjExitMonitor(driver, vm);
1398 virPortAllocatorRelease(driver->remotePorts, priv->nbdPort);
1403 qemuMigrationCancelDriveMirror(qemuMigrationCookiePtr mig,
1404 virQEMUDriverPtr driver,
1407 qemuDomainObjPrivatePtr priv = vm->privateData;
1409 char *diskAlias = NULL;
1411 VIR_DEBUG("mig=%p nbdPort=%d", mig->nbd, priv->nbdPort);
1413 for (i = 0; i < vm->def->ndisks; i++) {
1414 virDomainDiskDefPtr disk = vm->def->disks[i];
1416 /* skip shared, RO and source-less disks */
1417 if (disk->shared || disk->readonly || !virDomainDiskGetSource(disk))
1420 VIR_FREE(diskAlias);
1421 if (virAsprintf(&diskAlias, "%s%s",
1422 QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
1425 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1426 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
1429 if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, 0,
1430 NULL, BLOCK_JOB_ABORT, true) < 0)
1431 VIR_WARN("Unable to stop block job on %s", diskAlias);
1432 qemuDomainObjExitMonitor(driver, vm);
1436 VIR_FREE(diskAlias);
1440 /* Validate whether the domain is safe to migrate. If vm is NULL,
1441 * then this is being run in the v2 Prepare stage on the destination
1442 * (where we only have the target xml); if vm is provided, then this
1443 * is being run in either v2 Perform or v3 Begin (where we also have
1444 * access to all of the domain's metadata, such as whether it is
1445 * marked autodestroy or has snapshots). While it would be nice to
1446 * assume that checking on source is sufficient to prevent ever
1447 * talking to the destination in the first place, we are stuck with
1448 * the fact that older servers did not do checks on the source. */
1450 qemuMigrationIsAllowed(virQEMUDriverPtr driver, virDomainObjPtr vm,
1451 virDomainDefPtr def, bool remote, bool abort_on_error)
1459 if (qemuProcessAutoDestroyActive(driver, vm)) {
1460 virReportError(VIR_ERR_OPERATION_INVALID,
1461 "%s", _("domain is marked for auto destroy"));
1465 /* perform these checks only when migrating to remote hosts */
1467 nsnapshots = virDomainSnapshotObjListNum(vm->snapshots, NULL, 0);
1471 if (nsnapshots > 0) {
1472 virReportError(VIR_ERR_OPERATION_INVALID,
1473 _("cannot migrate domain with %d snapshots"),
1478 /* cancel migration if disk I/O error is emitted while migrating */
1479 if (abort_on_error &&
1480 virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
1481 pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
1482 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1483 _("cannot migrate domain with I/O error"));
1489 if (virDomainHasDiskMirror(vm)) {
1490 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1491 _("domain has an active block job"));
1498 /* Migration with USB host devices is allowed, all other devices are
1502 for (i = 0; i < def->nhostdevs; i++) {
1503 virDomainHostdevDefPtr hostdev = def->hostdevs[i];
1504 if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
1505 hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB) {
1511 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1512 _("domain has assigned non-USB host devices"));
1520 qemuMigrationIsSafe(virDomainDefPtr def)
1524 for (i = 0; i < def->ndisks; i++) {
1525 virDomainDiskDefPtr disk = def->disks[i];
1526 const char *src = virDomainDiskGetSource(disk);
1528 /* Our code elsewhere guarantees shared disks are either readonly (in
1529 * which case cache mode doesn't matter) or used with cache=none */
1533 disk->cachemode != VIR_DOMAIN_DISK_CACHE_DISABLE) {
1536 if (virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE) {
1537 if ((rc = virFileIsSharedFS(src)) < 0)
1541 if ((rc = virStorageFileIsClusterFS(src)) < 0)
1545 } else if (disk->src.type == VIR_STORAGE_TYPE_NETWORK &&
1546 disk->src.protocol == VIR_STORAGE_NET_PROTOCOL_RBD) {
1550 virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
1551 _("Migration may lead to data corruption if disks"
1552 " use cache != none"));
1560 /** qemuMigrationSetOffline
1561 * Pause domain for non-live migration.
1564 qemuMigrationSetOffline(virQEMUDriverPtr driver,
1568 VIR_DEBUG("driver=%p vm=%p", driver, vm);
1569 ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
1570 QEMU_ASYNC_JOB_MIGRATION_OUT);
1572 virObjectEventPtr event;
1574 event = virDomainEventLifecycleNewFromObj(vm,
1575 VIR_DOMAIN_EVENT_SUSPENDED,
1576 VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
1578 qemuDomainEventQueue(driver, event);
1586 qemuMigrationSetCompression(virQEMUDriverPtr driver,
1588 enum qemuDomainAsyncJob job)
1590 qemuDomainObjPrivatePtr priv = vm->privateData;
1593 if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0)
1596 ret = qemuMonitorGetMigrationCapability(
1598 QEMU_MONITOR_MIGRATION_CAPS_XBZRLE);
1602 } else if (ret == 0) {
1603 if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
1604 virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
1605 _("Compressed migration is not supported by "
1606 "target QEMU binary"));
1608 virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
1609 _("Compressed migration is not supported by "
1610 "source QEMU binary"));
1616 ret = qemuMonitorSetMigrationCapability(
1618 QEMU_MONITOR_MIGRATION_CAPS_XBZRLE);
1621 qemuDomainObjExitMonitor(driver, vm);
1626 qemuMigrationSetAutoConverge(virQEMUDriverPtr driver,
1628 enum qemuDomainAsyncJob job)
1630 qemuDomainObjPrivatePtr priv = vm->privateData;
1633 if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0)
1636 ret = qemuMonitorGetMigrationCapability(
1638 QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE);
1642 } else if (ret == 0) {
1643 virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
1644 _("Auto-Converge is not supported by "
1650 ret = qemuMonitorSetMigrationCapability(
1652 QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE);
1655 qemuDomainObjExitMonitor(driver, vm);
1661 qemuMigrationWaitForSpice(virQEMUDriverPtr driver,
1664 qemuDomainObjPrivatePtr priv = vm->privateData;
1665 bool wait_for_spice = false;
1666 bool spice_migrated = false;
1669 if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SEAMLESS_MIGRATION)) {
1670 for (i = 0; i < vm->def->ngraphics; i++) {
1671 if (vm->def->graphics[i]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
1672 wait_for_spice = true;
1678 if (!wait_for_spice)
1681 while (!spice_migrated) {
1682 /* Poll every 50ms for progress & to allow cancellation */
1683 struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
1685 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1686 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
1689 if (qemuMonitorGetSpiceMigrationStatus(priv->mon,
1690 &spice_migrated) < 0) {
1691 qemuDomainObjExitMonitor(driver, vm);
1694 qemuDomainObjExitMonitor(driver, vm);
1695 virObjectUnlock(vm);
1696 nanosleep(&ts, NULL);
1704 qemuMigrationUpdateJobStatus(virQEMUDriverPtr driver,
1707 enum qemuDomainAsyncJob asyncJob)
1709 qemuDomainObjPrivatePtr priv = vm->privateData;
1711 qemuMonitorMigrationStatus status;
1713 memset(&status, 0, sizeof(status));
1715 ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
1717 /* Guest already exited or waiting for the job timed out; nothing
1718 * further to update. */
1721 ret = qemuMonitorGetMigrationStatus(priv->mon, &status);
1723 qemuDomainObjExitMonitor(driver, vm);
1725 priv->job.status = status;
1727 if (ret < 0 || virTimeMillisNow(&priv->job.info.timeElapsed) < 0)
1730 priv->job.info.timeElapsed -= priv->job.start;
1733 switch (priv->job.status.status) {
1734 case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
1735 priv->job.info.type = VIR_DOMAIN_JOB_NONE;
1736 virReportError(VIR_ERR_OPERATION_FAILED,
1737 _("%s: %s"), job, _("is not active"));
1740 case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
1744 case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
1745 priv->job.info.fileTotal = priv->job.status.disk_total;
1746 priv->job.info.fileRemaining = priv->job.status.disk_remaining;
1747 priv->job.info.fileProcessed = priv->job.status.disk_transferred;
1749 priv->job.info.memTotal = priv->job.status.ram_total;
1750 priv->job.info.memRemaining = priv->job.status.ram_remaining;
1751 priv->job.info.memProcessed = priv->job.status.ram_transferred;
1753 priv->job.info.dataTotal =
1754 priv->job.status.ram_total + priv->job.status.disk_total;
1755 priv->job.info.dataRemaining =
1756 priv->job.status.ram_remaining + priv->job.status.disk_remaining;
1757 priv->job.info.dataProcessed =
1758 priv->job.status.ram_transferred +
1759 priv->job.status.disk_transferred;
1764 case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
1765 priv->job.info.type = VIR_DOMAIN_JOB_COMPLETED;
1769 case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
1770 priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
1771 virReportError(VIR_ERR_OPERATION_FAILED,
1772 _("%s: %s"), job, _("unexpectedly failed"));
1775 case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
1776 priv->job.info.type = VIR_DOMAIN_JOB_CANCELLED;
1777 virReportError(VIR_ERR_OPERATION_ABORTED,
1778 _("%s: %s"), job, _("canceled by client"));
1786 /* Returns 0 on success, -2 when migration needs to be cancelled, or -1 when
1787 * QEMU reports failed migration.
1790 qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, virDomainObjPtr vm,
1791 enum qemuDomainAsyncJob asyncJob,
1792 virConnectPtr dconn, bool abort_on_error)
1794 qemuDomainObjPrivatePtr priv = vm->privateData;
1798 switch (priv->job.asyncJob) {
1799 case QEMU_ASYNC_JOB_MIGRATION_OUT:
1800 job = _("migration job");
1802 case QEMU_ASYNC_JOB_SAVE:
1803 job = _("domain save job");
1805 case QEMU_ASYNC_JOB_DUMP:
1806 job = _("domain core dump job");
1812 priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
1814 while (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) {
1815 /* Poll every 50ms for progress & to allow cancellation */
1816 struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
1818 if (qemuMigrationUpdateJobStatus(driver, vm, job, asyncJob) == -1)
1821 /* cancel migration if disk I/O error is emitted while migrating */
1822 if (abort_on_error &&
1823 virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
1824 pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
1825 virReportError(VIR_ERR_OPERATION_FAILED,
1826 _("%s: %s"), job, _("failed due to I/O error"));
1830 if (dconn && virConnectIsAlive(dconn) <= 0) {
1831 virReportError(VIR_ERR_OPERATION_FAILED, "%s",
1832 _("Lost connection to destination host"));
1836 virObjectUnlock(vm);
1838 nanosleep(&ts, NULL);
1843 if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED) {
1845 } else if (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) {
1846 /* The migration was aborted by us rather than QEMU itself so let's
1847 * update the job type and notify the caller to send migrate_cancel.
1849 priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
1858 qemuDomainMigrateGraphicsRelocate(virQEMUDriverPtr driver,
1860 qemuMigrationCookiePtr cookie,
1861 const char *graphicsuri)
1863 qemuDomainObjPrivatePtr priv = vm->privateData;
1865 const char *listenAddress = NULL;
1867 virURIPtr uri = NULL;
1871 const char *tlsSubject = NULL;
1873 if (!cookie || (!cookie->graphics && !graphicsuri))
1876 if (graphicsuri && !(uri = virURIParse(graphicsuri)))
1879 if (cookie->graphics) {
1880 type = cookie->graphics->type;
1882 listenAddress = cookie->graphics->listen;
1884 if (!listenAddress ||
1885 (virSocketAddrParse(&addr, listenAddress, AF_UNSPEC) > 0 &&
1886 virSocketAddrIsWildcard(&addr)))
1887 listenAddress = cookie->remoteHostname;
1889 port = cookie->graphics->port;
1890 tlsPort = cookie->graphics->tlsPort;
1891 tlsSubject = cookie->graphics->tlsSubject;
1897 if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
1898 virReportError(VIR_ERR_INVALID_ARG,
1899 _("unknown graphics type %s"), uri->scheme);
1904 listenAddress = uri->server;
1908 for (i = 0; i < uri->paramsCount; i++) {
1909 virURIParamPtr param = uri->params + i;
1911 if (STRCASEEQ(param->name, "tlsPort")) {
1912 if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
1913 virReportError(VIR_ERR_INVALID_ARG,
1914 _("invalid tlsPort number: %s"),
1918 } else if (STRCASEEQ(param->name, "tlsSubject")) {
1919 tlsSubject = param->value;
1924 /* QEMU doesn't support VNC relocation yet, so
1925 * skip it to avoid generating an error
1927 if (type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
1932 if (qemuDomainObjEnterMonitorAsync(driver, vm,
1933 QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
1934 ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
1935 port, tlsPort, tlsSubject);
1936 qemuDomainObjExitMonitor(driver, vm);
1946 qemuDomainMigrateOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
1948 qemuMigrationCookiePtr cookie)
1950 virDomainNetDefPtr netptr;
1954 for (i = 0; i < cookie->network->nnets; i++) {
1955 netptr = vm->def->nets[i];
1957 switch (cookie->network->net[i].vporttype) {
1958 case VIR_NETDEV_VPORT_PROFILE_NONE:
1959 case VIR_NETDEV_VPORT_PROFILE_8021QBG:
1960 case VIR_NETDEV_VPORT_PROFILE_8021QBH:
1962 case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
1963 if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
1964 netptr->ifname) != 0) {
1965 virReportSystemError(VIR_ERR_INTERNAL_ERROR,
1966 _("Unable to run command to set OVS port data for "
1967 "interface %s"), netptr->ifname);
1982 /* This is called for outgoing non-p2p migrations when a connection to the
1983 * client which initiated the migration was closed but we were waiting for it
1984 * to follow up with the next phase, that is, in between
1985 * qemuDomainMigrateBegin3 and qemuDomainMigratePerform3 or
1986 * qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
1988 static virDomainObjPtr
1989 qemuMigrationCleanup(virDomainObjPtr vm,
1993 virQEMUDriverPtr driver = opaque;
1994 qemuDomainObjPrivatePtr priv = vm->privateData;
1996 VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
1997 vm->def->name, conn,
1998 qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
1999 qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
2002 if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
2005 VIR_DEBUG("The connection which started outgoing migration of domain %s"
2006 " was closed; canceling the migration",
2009 switch ((enum qemuMigrationJobPhase) priv->job.phase) {
2010 case QEMU_MIGRATION_PHASE_BEGIN3:
2011 /* just forget we were about to migrate */
2012 qemuDomainObjDiscardAsyncJob(driver, vm);
2015 case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
2016 VIR_WARN("Migration of domain %s finished but we don't know if the"
2017 " domain was successfully started on destination or not",
2019 /* clear the job and let higher levels decide what to do */
2020 qemuDomainObjDiscardAsyncJob(driver, vm);
2023 case QEMU_MIGRATION_PHASE_PERFORM3:
2024 /* cannot be seen without an active migration API; unreachable */
2025 case QEMU_MIGRATION_PHASE_CONFIRM3:
2026 case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
2027 /* all done; unreachable */
2028 case QEMU_MIGRATION_PHASE_PREPARE:
2029 case QEMU_MIGRATION_PHASE_FINISH2:
2030 case QEMU_MIGRATION_PHASE_FINISH3:
2031 /* incoming migration; unreachable */
2032 case QEMU_MIGRATION_PHASE_PERFORM2:
2033 /* single phase outgoing migration; unreachable */
2034 case QEMU_MIGRATION_PHASE_NONE:
2035 case QEMU_MIGRATION_PHASE_LAST:
2045 /* The caller is supposed to lock the vm and start a migration job. */
2047 *qemuMigrationBeginPhase(virQEMUDriverPtr driver,
2053 unsigned long flags)
2056 qemuMigrationCookiePtr mig = NULL;
2057 virDomainDefPtr def = NULL;
2058 qemuDomainObjPrivatePtr priv = vm->privateData;
2059 virCapsPtr caps = NULL;
2060 unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_LOCKSTATE;
2061 bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
2063 VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, dname=%s,"
2064 " cookieout=%p, cookieoutlen=%p, flags=%lx",
2065 driver, vm, NULLSTR(xmlin), NULLSTR(dname),
2066 cookieout, cookieoutlen, flags);
2068 if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
2071 /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
2072 * Otherwise we will start the async job later in the perform phase losing
2073 * change protection.
2075 if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
2076 qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
2078 if (!qemuMigrationIsAllowed(driver, vm, NULL, true, abort_on_error))
2081 if (!(flags & VIR_MIGRATE_UNSAFE) && !qemuMigrationIsSafe(vm->def))
2084 if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
2085 virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DRIVE_MIRROR)) {
2086 /* TODO support NBD for TUNNELLED migration */
2087 if (flags & VIR_MIGRATE_TUNNELLED) {
2088 VIR_WARN("NBD in tunnelled migration is currently not supported");
2090 cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
2095 if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0)))
2098 if (qemuMigrationBakeCookie(mig, driver, vm,
2099 cookieout, cookieoutlen,
2103 if (flags & VIR_MIGRATE_OFFLINE) {
2104 if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
2105 VIR_MIGRATE_NON_SHARED_INC)) {
2106 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
2107 _("offline migration cannot handle "
2108 "non-shared storage"));
2111 if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
2112 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
2113 _("offline migration must be specified with "
2114 "the persistent flag set"));
2117 if (flags & VIR_MIGRATE_TUNNELLED) {
2118 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
2119 _("tunnelled offline migration does not "
2126 if (!(def = virDomainDefParseString(xmlin, caps, driver->xmlopt,
2127 QEMU_EXPECTED_VIRT_TYPES,
2128 VIR_DOMAIN_XML_INACTIVE)))
2131 if (!qemuDomainDefCheckABIStability(driver, vm->def, def))
2134 rv = qemuDomainDefFormatLive(driver, def, false, true);
2136 rv = qemuDomainDefFormatLive(driver, vm->def, false, true);
2140 qemuMigrationCookieFree(mig);
2141 virObjectUnref(caps);
2142 virDomainDefFree(def);
2147 qemuMigrationBegin(virConnectPtr conn,
2153 unsigned long flags)
2155 virQEMUDriverPtr driver = conn->privateData;
2157 enum qemuDomainAsyncJob asyncJob;
2159 if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
2160 if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
2162 asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
2164 if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
2166 asyncJob = QEMU_ASYNC_JOB_NONE;
2169 qemuMigrationStoreDomainState(vm);
2171 if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
2172 virReportError(VIR_ERR_OPERATION_INVALID,
2173 "%s", _("domain is not running"));
2177 /* Check if there is any ejected media.
2178 * We don't want to require them on the destination.
2180 if (!(flags & VIR_MIGRATE_OFFLINE) &&
2181 qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
2184 if (!(xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
2185 cookieout, cookieoutlen,
2189 if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
2190 /* We keep the job active across API calls until the confirm() call.
2191 * This prevents any other APIs being invoked while migration is taking
2194 if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
2195 qemuMigrationCleanup) < 0)
2197 if (qemuMigrationJobContinue(vm) == 0) {
2199 virReportError(VIR_ERR_OPERATION_FAILED,
2200 "%s", _("domain disappeared"));
2203 VIR_FREE(*cookieout);
2211 virObjectUnlock(vm);
2215 if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
2216 if (qemuMigrationJobFinish(driver, vm) == 0)
2219 if (!qemuDomainObjEndJob(driver, vm))
2226 /* Prepare is the first step, and it runs on the destination host.
2230 qemuMigrationPrepareCleanup(virQEMUDriverPtr driver,
2233 qemuDomainObjPrivatePtr priv = vm->privateData;
2235 VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
2238 qemuDomainJobTypeToString(priv->job.active),
2239 qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
2241 virPortAllocatorRelease(driver->migrationPorts, priv->migrationPort);
2242 priv->migrationPort = 0;
2244 if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
2246 qemuDomainObjDiscardAsyncJob(driver, vm);
2250 qemuMigrationPrepareAny(virQEMUDriverPtr driver,
2251 virConnectPtr dconn,
2252 const char *cookiein,
2256 virDomainDefPtr *def,
2257 const char *origname,
2259 unsigned short port,
2261 const char *listenAddress,
2262 unsigned long flags)
2264 virDomainObjPtr vm = NULL;
2265 virObjectEventPtr event = NULL;
2267 int dataFD[2] = { -1, -1 };
2268 qemuDomainObjPrivatePtr priv = NULL;
2269 unsigned long long now;
2270 qemuMigrationCookiePtr mig = NULL;
2272 char *xmlout = NULL;
2273 unsigned int cookieFlags;
2274 virCapsPtr caps = NULL;
2275 char *migrateFrom = NULL;
2276 bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
2277 bool taint_hook = false;
2279 if (virTimeMillisNow(&now) < 0)
2282 if (flags & VIR_MIGRATE_OFFLINE) {
2283 if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
2284 VIR_MIGRATE_NON_SHARED_INC)) {
2285 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
2286 _("offline migration cannot handle "
2287 "non-shared storage"));
2290 if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
2291 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
2292 _("offline migration must be specified with "
2293 "the persistent flag set"));
2297 virReportError(VIR_ERR_OPERATION_INVALID, "%s",
2298 _("tunnelled offline migration does not "
2304 if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
2307 if (!qemuMigrationIsAllowed(driver, NULL, *def, true, abort_on_error))
2310 /* Let migration hook filter domain XML */
2311 if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
2315 if (!(xml = qemuDomainDefFormatXML(driver, *def,
2316 VIR_DOMAIN_XML_SECURE |
2317 VIR_DOMAIN_XML_MIGRATABLE)))
2320 hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, (*def)->name,
2321 VIR_HOOK_QEMU_OP_MIGRATE, VIR_HOOK_SUBOP_BEGIN,
2322 NULL, xml, &xmlout);
2327 } else if (hookret == 0) {
2329 VIR_DEBUG("Migrate hook filter returned nothing; using the"
2332 virDomainDefPtr newdef;
2334 VIR_DEBUG("Using hook-filtered domain XML: %s", xmlout);
2335 newdef = virDomainDefParseString(xmlout, caps, driver->xmlopt,
2336 QEMU_EXPECTED_VIRT_TYPES,
2337 VIR_DOMAIN_XML_INACTIVE);
2341 if (!qemuDomainDefCheckABIStability(driver, *def, newdef)) {
2342 virDomainDefFree(newdef);
2346 virDomainDefFree(*def);
2348 /* We should taint the domain here. However, @vm and therefore
2349 * privateData too are still NULL, so just notice the fact and
2350 * taint it later. */
2357 /* QEMU will be started with -incoming stdio
2358 * (which qemu_command might convert to exec:cat or fd:n)
2360 if (VIR_STRDUP(migrateFrom, "stdio") < 0)
2363 virSocketAddr listenAddressSocket;
2364 bool encloseAddress = false;
2365 bool hostIPv6Capable = false;
2366 bool qemuIPv6Capable = false;
2367 virQEMUCapsPtr qemuCaps = NULL;
2368 struct addrinfo *info = NULL;
2369 struct addrinfo hints = { .ai_flags = AI_ADDRCONFIG,
2370 .ai_socktype = SOCK_STREAM };
2372 if (getaddrinfo("::", NULL, &hints, &info) == 0) {
2374 hostIPv6Capable = true;
2376 if (!(qemuCaps = virQEMUCapsCacheLookupCopy(driver->qemuCapsCache,
2380 qemuIPv6Capable = virQEMUCapsGet(qemuCaps, QEMU_CAPS_IPV6_MIGRATION);
2381 virObjectUnref(qemuCaps);
2383 if (listenAddress) {
2384 if (virSocketAddrIsNumeric(listenAddress)) {
2385 /* listenAddress is numeric IPv4 or IPv6 */
2386 if (virSocketAddrParse(&listenAddressSocket, listenAddress, AF_UNSPEC) < 0)
2389 /* address parsed successfully */
2390 if (VIR_SOCKET_ADDR_IS_FAMILY(&listenAddressSocket, AF_INET6)) {
2391 if (!qemuIPv6Capable) {
2392 virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
2393 _("qemu isn't capable of IPv6"));
2396 if (!hostIPv6Capable) {
2397 virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
2398 _("host isn't capable of IPv6"));
2401 /* IPv6 address must be escaped in brackets on the cmd line */
2402 encloseAddress = true;
2405 /* listenAddress is a hostname */
2408 /* Listen on :: instead of 0.0.0.0 if QEMU understands it
2409 * and there is at least one IPv6 address configured
2411 listenAddress = qemuIPv6Capable && hostIPv6Capable ?
2412 encloseAddress = true, "::" : "0.0.0.0";
2415 /* QEMU will be started with -incoming [<IPv6 addr>]:port,
2416 * -incoming <IPv4 addr>:port or -incoming <hostname>:port
2418 if ((encloseAddress &&
2419 virAsprintf(&migrateFrom, "tcp:[%s]:%d", listenAddress, port) < 0) ||
2421 virAsprintf(&migrateFrom, "tcp:%s:%d", listenAddress, port) < 0))
2425 if (!(vm = virDomainObjListAdd(driver->domains, *def,
2427 VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
2428 VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
2433 priv = vm->privateData;
2434 if (VIR_STRDUP(priv->origname, origname) < 0)
2438 /* Domain XML has been altered by a hook script. */
2439 priv->hookRun = true;
2442 if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
2443 QEMU_MIGRATION_COOKIE_LOCKSTATE |
2444 QEMU_MIGRATION_COOKIE_NBD)))
2447 if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
2449 qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
2451 /* Domain starts inactive, even if the domain XML had an id field. */
2454 if (flags & VIR_MIGRATE_OFFLINE)
2458 (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
2459 virReportSystemError(errno, "%s",
2460 _("cannot create pipe for tunnelled migration"));
2464 /* Start the QEMU daemon, with the same command-line arguments plus
2465 * -incoming $migrateFrom
2467 if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL,
2468 VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
2469 VIR_QEMU_PROCESS_START_PAUSED |
2470 VIR_QEMU_PROCESS_START_AUTODESTROY) < 0) {
2471 virDomainAuditStart(vm, "migrated", false);
2476 if (virFDStreamOpen(st, dataFD[1]) < 0) {
2477 virReportSystemError(errno, "%s",
2478 _("cannot pass pipe for tunnelled migration"));
2481 dataFD[1] = -1; /* 'st' owns the FD now & will close it */
2484 if (flags & VIR_MIGRATE_COMPRESSED &&
2485 qemuMigrationSetCompression(driver, vm,
2486 QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
2489 if (mig->lockState) {
2490 VIR_DEBUG("Received lockstate %s", mig->lockState);
2491 VIR_FREE(priv->lockState);
2492 priv->lockState = mig->lockState;
2493 mig->lockState = NULL;
2495 VIR_DEBUG("Received no lockstate");
2499 if (flags & VIR_MIGRATE_OFFLINE)
2502 cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS;
2505 flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
2506 virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
2507 if (qemuMigrationStartNBDServer(driver, vm, listenAddress) < 0) {
2508 /* error already reported */
2511 cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
2514 if (qemuMigrationBakeCookie(mig, driver, vm, cookieout,
2515 cookieoutlen, cookieFlags) < 0) {
2516 /* We could tear down the whole guest here, but
2517 * cookie data is (so far) non-critical, so that
2518 * seems a little harsh. We'll just warn for now.
2520 VIR_WARN("Unable to encode migration cookie");
2523 if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
2526 if (!(flags & VIR_MIGRATE_OFFLINE)) {
2527 virDomainAuditStart(vm, "migrated", true);
2528 event = virDomainEventLifecycleNewFromObj(vm,
2529 VIR_DOMAIN_EVENT_STARTED,
2530 VIR_DOMAIN_EVENT_STARTED_MIGRATED);
2533 /* We keep the job active across API calls until the finish() call.
2534 * This prevents any other APIs being invoked while incoming
2535 * migration is taking place.
2537 if (!qemuMigrationJobContinue(vm)) {
2539 virReportError(VIR_ERR_OPERATION_FAILED,
2540 "%s", _("domain disappeared"));
2545 priv->migrationPort = port;
2549 VIR_FREE(migrateFrom);
2551 VIR_FORCE_CLOSE(dataFD[0]);
2552 VIR_FORCE_CLOSE(dataFD[1]);
2555 virPortAllocatorRelease(driver->remotePorts, priv->nbdPort);
2558 if (ret >= 0 || vm->persistent)
2559 virObjectUnlock(vm);
2561 qemuDomainRemoveInactive(driver, vm);
2564 qemuDomainEventQueue(driver, event);
2565 qemuMigrationCookieFree(mig);
2566 virObjectUnref(caps);
2570 virDomainAuditStart(vm, "migrated", false);
2571 qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
2574 if (!qemuMigrationJobFinish(driver, vm)) {
2582 * This version starts an empty VM listening on a localhost TCP port, and
2583 * sets up the corresponding virStream to handle the incoming data.
2586 qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
2587 virConnectPtr dconn,
2588 const char *cookiein,
2593 virDomainDefPtr *def,
2594 const char *origname,
2595 unsigned long flags)
2599 VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2600 "cookieout=%p, cookieoutlen=%p, st=%p, def=%p, "
2601 "origname=%s, flags=%lx",
2602 driver, dconn, NULLSTR(cookiein), cookieinlen,
2603 cookieout, cookieoutlen, st, *def, origname, flags);
2606 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
2607 _("tunnelled migration requested but NULL stream passed"));
2611 ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
2612 cookieout, cookieoutlen, def, origname,
2613 st, 0, false, NULL, flags);
2619 qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
2620 virConnectPtr dconn,
2621 const char *cookiein,
2627 virDomainDefPtr *def,
2628 const char *origname,
2629 const char *listenAddress,
2630 unsigned long flags)
2632 unsigned short port = 0;
2633 bool autoPort = true;
2634 char *hostname = NULL;
2636 char *uri_str = NULL;
2638 virURIPtr uri = NULL;
2639 bool well_formed_uri = true;
2640 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
2641 const char *migrateHost = cfg->migrateHost;
2643 VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2644 "cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
2645 "def=%p, origname=%s, listenAddress=%s, flags=%lx",
2646 driver, dconn, NULLSTR(cookiein), cookieinlen,
2647 cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
2648 *def, origname, NULLSTR(listenAddress), flags);
2652 /* The URI passed in may be NULL or a string "tcp://somehostname:port".
2654 * If the URI passed in is NULL then we allocate a port number
2655 * from our pool of port numbers, and if the migrateHost is configured,
2656 * we return a URI of "tcp://migrateHost:port", otherwise return a URI
2657 * of "tcp://ourhostname:port".
2659 * If the URI passed in is not NULL then we try to parse out the
2660 * port number and use that (note that the hostname is assumed
2661 * to be a correct hostname which refers to the target machine).
2663 if (uri_in == NULL) {
2664 if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2667 if (migrateHost != NULL) {
2668 if (virSocketAddrIsNumeric(migrateHost) &&
2669 virSocketAddrParse(NULL, migrateHost, AF_UNSPEC) < 0)
2672 if (VIR_STRDUP(hostname, migrateHost) < 0)
2675 if ((hostname = virGetHostname()) == NULL)
2679 if (STRPREFIX(hostname, "localhost")) {
2680 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
2681 _("hostname on destination resolved to localhost,"
2682 " but migration requires an FQDN"));
2686 /* XXX this really should have been a properly well-formed
2687 * URI, but we can't add in tcp:// now without breaking
2688 * compatibility with old targets. We at least make the
2689 * new targets accept both syntaxes though.
2692 if (virAsprintf(uri_out, "tcp:%s:%d", hostname, port) < 0)
2695 /* Check the URI starts with "tcp:". We will escape the
2696 * URI when passing it to the qemu monitor, so bad
2697 * characters in hostname part don't matter.
2699 if (!(p = STRSKIP(uri_in, "tcp:"))) {
2700 virReportError(VIR_ERR_INVALID_ARG, "%s",
2701 _("only tcp URIs are supported for KVM/QEMU"
2706 /* Convert uri_in to well-formed URI with // after tcp: */
2707 if (!(STRPREFIX(uri_in, "tcp://"))) {
2708 well_formed_uri = false;
2709 if (virAsprintf(&uri_str, "tcp://%s", p) < 0)
2713 uri = virURIParse(uri_str ? uri_str : uri_in);
2717 virReportError(VIR_ERR_INVALID_ARG, _("unable to parse URI: %s"),
2722 if (uri->server == NULL) {
2723 virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
2724 " URI: %s"), uri_in);
2728 if (uri->port == 0) {
2729 if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2732 if (well_formed_uri) {
2736 if (!(*uri_out = virURIFormat(uri)))
2740 if (virAsprintf(uri_out, "%s:%d", uri_in, port) < 0)
2751 VIR_DEBUG("Generated uri_out=%s", *uri_out);
2753 ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
2754 cookieout, cookieoutlen, def, origname,
2755 NULL, port, autoPort, listenAddress, flags);
2759 virObjectUnref(cfg);
2763 virPortAllocatorRelease(driver->migrationPorts, port);
2770 qemuMigrationPrepareDef(virQEMUDriverPtr driver,
2771 const char *dom_xml,
2775 virCapsPtr caps = NULL;
2776 virDomainDefPtr def;
2780 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
2781 _("no domain XML passed"));
2785 if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
2788 if (!(def = virDomainDefParseString(dom_xml, caps, driver->xmlopt,
2789 QEMU_EXPECTED_VIRT_TYPES,
2790 VIR_DOMAIN_XML_INACTIVE)))
2795 if (VIR_STRDUP(def->name, dname) < 0) {
2796 virDomainDefFree(def);
2802 virObjectUnref(caps);
2803 if (def && origname)
2812 qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
2815 const char *cookiein,
2820 qemuMigrationCookiePtr mig;
2821 virObjectEventPtr event = NULL;
2823 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
2825 VIR_DEBUG("driver=%p, conn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
2826 "flags=%x, retcode=%d",
2827 driver, conn, vm, NULLSTR(cookiein), cookieinlen,
2830 virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
2832 qemuMigrationJobSetPhase(driver, vm,
2834 ? QEMU_MIGRATION_PHASE_CONFIRM3
2835 : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);
2837 if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
2840 if (flags & VIR_MIGRATE_OFFLINE)
2843 /* Did the migration go as planned? If yes, kill off the
2844 * domain object, but if no, resume CPUs
2847 /* If guest uses SPICE and supports seamless migration we have to hold
2848 * up domain shutdown until SPICE server transfers its data */
2849 qemuMigrationWaitForSpice(driver, vm);
2851 qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
2852 VIR_QEMU_PROCESS_STOP_MIGRATED);
2853 virDomainAuditStop(vm, "migrated");
2855 event = virDomainEventLifecycleNewFromObj(vm,
2856 VIR_DOMAIN_EVENT_STOPPED,
2857 VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
2860 /* cancel any outstanding NBD jobs */
2861 qemuMigrationCancelDriveMirror(mig, driver, vm);
2863 if (qemuMigrationRestoreDomainState(conn, vm)) {
2864 event = virDomainEventLifecycleNewFromObj(vm,
2865 VIR_DOMAIN_EVENT_RESUMED,
2866 VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
2869 if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) {
2870 VIR_WARN("Failed to save status on vm %s", vm->def->name);
2876 qemuMigrationCookieFree(mig);
2881 qemuDomainEventQueue(driver, event);
2882 virObjectUnref(cfg);
2887 qemuMigrationConfirm(virConnectPtr conn,
2889 const char *cookiein,
2894 virQEMUDriverPtr driver = conn->privateData;
2895 enum qemuMigrationJobPhase phase;
2896 virQEMUDriverConfigPtr cfg = NULL;
2899 cfg = virQEMUDriverGetConfig(driver);
2901 if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
2905 phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
2907 phase = QEMU_MIGRATION_PHASE_CONFIRM3;
2909 qemuMigrationJobStartPhase(driver, vm, phase);
2910 virCloseCallbacksUnset(driver->closeCallbacks, vm,
2911 qemuMigrationCleanup);
2913 ret = qemuMigrationConfirmPhase(driver, conn, vm,
2914 cookiein, cookieinlen,
2917 if (qemuMigrationJobFinish(driver, vm) == 0) {
2919 } else if (!virDomainObjIsActive(vm) &&
2920 (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
2921 if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
2922 virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
2923 qemuDomainRemoveInactive(driver, vm);
2929 virObjectUnlock(vm);
2930 virObjectUnref(cfg);
2935 enum qemuMigrationDestinationType {
2936 MIGRATION_DEST_HOST,
2937 MIGRATION_DEST_CONNECT_HOST,
2938 MIGRATION_DEST_UNIX,
2942 enum qemuMigrationForwardType {
2943 MIGRATION_FWD_DIRECT,
2944 MIGRATION_FWD_STREAM,
2947 typedef struct _qemuMigrationSpec qemuMigrationSpec;
2948 typedef qemuMigrationSpec *qemuMigrationSpecPtr;
2949 struct _qemuMigrationSpec {
2950 enum qemuMigrationDestinationType destType;
2968 enum qemuMigrationForwardType fwdType;
2970 virStreamPtr stream;
2974 #define TUNNEL_SEND_BUF_SIZE 65536
2976 typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
2977 typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
2978 struct _qemuMigrationIOThread {
2987 static void qemuMigrationIOFunc(void *arg)
2989 qemuMigrationIOThreadPtr data = arg;
2990 char *buffer = NULL;
2991 struct pollfd fds[2];
2993 virErrorPtr err = NULL;
2995 VIR_DEBUG("Running migration tunnel; stream=%p, sock=%d",
2996 data->st, data->sock);
2998 if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0)
3001 fds[0].fd = data->sock;
3002 fds[1].fd = data->wakeupRecvFD;
3007 fds[0].events = fds[1].events = POLLIN;
3008 fds[0].revents = fds[1].revents = 0;
3010 ret = poll(fds, ARRAY_CARDINALITY(fds), timeout);
3013 if (errno == EAGAIN || errno == EINTR)
3015 virReportSystemError(errno, "%s",
3016 _("poll failed in migration tunnel"));
3021 /* We were asked to gracefully stop but reading would block. This
3022 * can only happen if qemu told us migration finished but didn't
3023 * close the migration fd. We handle this in the same way as EOF.
3025 VIR_DEBUG("QEMU forgot to close migration fd");
3029 if (fds[1].revents & (POLLIN | POLLERR | POLLHUP)) {
3032 if (saferead(data->wakeupRecvFD, &stop, 1) != 1) {
3033 virReportSystemError(errno, "%s",
3034 _("failed to read from wakeup fd"));
3038 VIR_DEBUG("Migration tunnel was asked to %s",
3039 stop ? "abort" : "finish");
3047 if (fds[0].revents & (POLLIN | POLLERR | POLLHUP)) {
3050 nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
3052 if (virStreamSend(data->st, buffer, nbytes) < 0)
3054 } else if (nbytes < 0) {
3055 virReportSystemError(errno, "%s",
3056 _("tunnelled migration failed to read from qemu"));
3059 /* EOF; get out of here */
3065 if (virStreamFinish(data->st) < 0)
3073 err = virSaveLastError();
3074 if (err && err->code == VIR_ERR_OK) {
3078 virStreamAbort(data->st);
3085 virCopyLastError(&data->err);
3086 virResetLastError();
3091 static qemuMigrationIOThreadPtr
3092 qemuMigrationStartTunnel(virStreamPtr st,
3095 qemuMigrationIOThreadPtr io = NULL;
3096 int wakeupFD[2] = { -1, -1 };
3098 if (pipe2(wakeupFD, O_CLOEXEC) < 0) {
3099 virReportSystemError(errno, "%s",
3100 _("Unable to make pipe"));
3104 if (VIR_ALLOC(io) < 0)
3109 io->wakeupRecvFD = wakeupFD[0];
3110 io->wakeupSendFD = wakeupFD[1];
3112 if (virThreadCreate(&io->thread, true,
3113 qemuMigrationIOFunc,
3115 virReportSystemError(errno, "%s",
3116 _("Unable to create migration thread"));
3123 VIR_FORCE_CLOSE(wakeupFD[0]);
3124 VIR_FORCE_CLOSE(wakeupFD[1]);
3130 qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io, bool error)
3133 char stop = error ? 1 : 0;
3135 /* make sure the thread finishes its job and is joinable */
3136 if (safewrite(io->wakeupSendFD, &stop, 1) != 1) {
3137 virReportSystemError(errno, "%s",
3138 _("failed to wakeup migration tunnel"));
3142 virThreadJoin(&io->thread);
3144 /* Forward error from the IO thread, to this thread */
3145 if (io->err.code != VIR_ERR_OK) {
3149 virSetError(&io->err);
3150 virResetError(&io->err);
3157 VIR_FORCE_CLOSE(io->wakeupSendFD);
3158 VIR_FORCE_CLOSE(io->wakeupRecvFD);
3164 qemuMigrationConnect(virQEMUDriverPtr driver,
3166 qemuMigrationSpecPtr spec)
3168 virNetSocketPtr sock;
3173 host = spec->dest.host.name;
3174 if (virAsprintf(&port, "%d", spec->dest.host.port) < 0)
3177 spec->destType = MIGRATION_DEST_FD;
3178 spec->dest.fd.qemu = -1;
3180 if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
3182 if (virNetSocketNewConnectTCP(host, port, &sock) == 0) {
3183 spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
3184 virObjectUnref(sock);
3186 if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
3187 spec->dest.fd.qemu == -1)
3190 /* Migration expects a blocking FD */
3191 if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
3192 virReportSystemError(errno, _("Unable to set FD %d blocking"),
3193 spec->dest.fd.qemu);
3202 VIR_FORCE_CLOSE(spec->dest.fd.qemu);
3207 qemuMigrationRun(virQEMUDriverPtr driver,
3209 const char *cookiein,
3213 unsigned long flags,
3214 unsigned long resource,
3215 qemuMigrationSpecPtr spec,
3216 virConnectPtr dconn,
3217 const char *graphicsuri)
3220 unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
3221 qemuDomainObjPrivatePtr priv = vm->privateData;
3222 qemuMigrationCookiePtr mig = NULL;
3223 qemuMigrationIOThreadPtr iothread = NULL;
3225 unsigned long migrate_speed = resource ? resource : priv->migMaxBandwidth;
3226 virErrorPtr orig_err = NULL;
3227 unsigned int cookieFlags = 0;
3228 bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
3231 VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
3232 "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
3233 "spec=%p (dest=%d, fwd=%d), dconn=%p, graphicsuri=%s",
3234 driver, vm, NULLSTR(cookiein), cookieinlen,
3235 cookieout, cookieoutlen, flags, resource,
3236 spec, spec->destType, spec->fwdType, dconn,
3237 NULLSTR(graphicsuri));
3239 if (flags & VIR_MIGRATE_NON_SHARED_DISK) {
3240 migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
3241 cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
3244 if (flags & VIR_MIGRATE_NON_SHARED_INC) {
3245 migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
3246 cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
3249 if (virLockManagerPluginUsesState(driver->lockManager) &&
3251 virReportError(VIR_ERR_INTERNAL_ERROR,
3252 _("Migration with lock driver %s requires"
3254 virLockManagerPluginGetName(driver->lockManager));
3258 mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
3259 cookieFlags | QEMU_MIGRATION_COOKIE_GRAPHICS);
3263 if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
3264 VIR_WARN("unable to provide data for graphics client relocation");
3266 /* this will update migrate_flags on success */
3267 if (qemuMigrationDriveMirror(driver, vm, mig, spec->dest.host.name,
3268 migrate_speed, &migrate_flags) < 0) {
3269 /* error reported by helper func */
3273 /* Before EnterMonitor, since qemuMigrationSetOffline already does that */
3274 if (!(flags & VIR_MIGRATE_LIVE) &&
3275 virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
3276 if (qemuMigrationSetOffline(driver, vm) < 0)
3280 if (flags & VIR_MIGRATE_COMPRESSED &&
3281 qemuMigrationSetCompression(driver, vm,
3282 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3285 if (flags & VIR_MIGRATE_AUTO_CONVERGE &&
3286 qemuMigrationSetAutoConverge(driver, vm,
3287 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3290 if (qemuDomainObjEnterMonitorAsync(driver, vm,
3291 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3294 if (priv->job.asyncAbort) {
3295 /* explicitly do this *after* we entered the monitor,
3296 * as this is a critical section so we are guaranteed
3297 * priv->job.asyncAbort will not change */
3298 qemuDomainObjExitMonitor(driver, vm);
3299 virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
3300 qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
3301 _("canceled by client"));
3305 if (qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0) {
3306 qemuDomainObjExitMonitor(driver, vm);
3310 /* connect to the destination qemu if needed */
3311 if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
3312 qemuMigrationConnect(driver, vm, spec) < 0) {
3313 qemuDomainObjExitMonitor(driver, vm);
3317 switch (spec->destType) {
3318 case MIGRATION_DEST_HOST:
3319 ret = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
3320 spec->dest.host.name,
3321 spec->dest.host.port);
3324 case MIGRATION_DEST_CONNECT_HOST:
3325 /* handled above and transformed into MIGRATION_DEST_FD */
3328 case MIGRATION_DEST_UNIX:
3329 if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
3330 ret = qemuMonitorMigrateToUnix(priv->mon, migrate_flags,
3331 spec->dest.unix_socket.file);
3333 const char *args[] = {
3334 "nc", "-U", spec->dest.unix_socket.file, NULL
3336 ret = qemuMonitorMigrateToCommand(priv->mon, migrate_flags, args);
3340 case MIGRATION_DEST_FD:
3341 if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3342 fd = spec->dest.fd.local;
3343 spec->dest.fd.local = -1;
3345 ret = qemuMonitorMigrateToFd(priv->mon, migrate_flags,
3346 spec->dest.fd.qemu);
3347 VIR_FORCE_CLOSE(spec->dest.fd.qemu);
3350 qemuDomainObjExitMonitor(driver, vm);
3355 if (!virDomainObjIsActive(vm)) {
3356 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
3357 _("guest unexpectedly quit"));
3361 /* From this point onwards we *must* call cancel to abort the
3362 * migration on source if anything goes wrong */
3364 if (spec->destType == MIGRATION_DEST_UNIX) {
3365 /* It is also possible that the migrate didn't fail initially, but
3366 * rather failed later on. Check its status before waiting for a
3367 * connection from qemu which may never be initiated.
3369 if (qemuMigrationUpdateJobStatus(driver, vm, _("migration job"),
3370 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3373 while ((fd = accept(spec->dest.unix_socket.sock, NULL, NULL)) < 0) {
3374 if (errno == EAGAIN || errno == EINTR)
3376 virReportSystemError(errno, "%s",
3377 _("failed to accept connection from qemu"));
3382 if (spec->fwdType != MIGRATION_FWD_DIRECT &&
3383 !(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
3386 rc = qemuMigrationWaitForCompletion(driver, vm,
3387 QEMU_ASYNC_JOB_MIGRATION_OUT,
3388 dconn, abort_on_error);
3394 /* When migration completed, QEMU will have paused the
3395 * CPUs for us, but unless we're using the JSON monitor
3396 * we won't have been notified of this, so might still
3397 * think we're running. For v2 protocol this doesn't
3398 * matter because we'll kill the VM soon, but for v3
3399 * this is important because we stay paused until the
3400 * confirm3 step, but need to release the lock state
3402 if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
3403 if (qemuMigrationSetOffline(driver, vm) < 0)
3410 if (ret < 0 && !orig_err)
3411 orig_err = virSaveLastError();
3413 /* cancel any outstanding NBD jobs */
3414 qemuMigrationCancelDriveMirror(mig, driver, vm);
3416 if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3417 if (iothread && qemuMigrationStopTunnel(iothread, ret < 0) < 0)
3419 VIR_FORCE_CLOSE(fd);
3422 cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK;
3423 if (flags & VIR_MIGRATE_PERSIST_DEST)
3424 cookieFlags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
3426 qemuMigrationBakeCookie(mig, driver, vm, cookieout,
3427 cookieoutlen, cookieFlags) < 0) {
3428 VIR_WARN("Unable to encode migration cookie");
3431 qemuMigrationCookieFree(mig);
3434 virSetError(orig_err);
3435 virFreeError(orig_err);
3441 orig_err = virSaveLastError();
3443 if (virDomainObjIsActive(vm)) {
3444 if (qemuDomainObjEnterMonitorAsync(driver, vm,
3445 QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
3446 qemuMonitorMigrateCancel(priv->mon);
3447 qemuDomainObjExitMonitor(driver, vm);
3453 /* Perform migration using QEMU's native TCP migrate support,
3454 * not encrypted obviously
3456 static int doNativeMigrate(virQEMUDriverPtr driver,
3459 const char *cookiein,
3463 unsigned long flags,
3464 unsigned long resource,
3465 virConnectPtr dconn,
3466 const char *graphicsuri)
3468 qemuDomainObjPrivatePtr priv = vm->privateData;
3469 virURIPtr uribits = NULL;
3471 qemuMigrationSpec spec;
3473 VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
3474 "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
3476 driver, vm, uri, NULLSTR(cookiein), cookieinlen,
3477 cookieout, cookieoutlen, flags, resource,
3478 NULLSTR(graphicsuri));
3480 if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
3482 /* HACK: source host generates bogus URIs, so fix them up */
3483 if (virAsprintf(&tmp, "tcp://%s", uri + strlen("tcp:")) < 0)
3485 uribits = virURIParse(tmp);
3488 uribits = virURIParse(uri);
3493 if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD))
3494 spec.destType = MIGRATION_DEST_CONNECT_HOST;
3496 spec.destType = MIGRATION_DEST_HOST;
3497 spec.dest.host.name = uribits->server;
3498 spec.dest.host.port = uribits->port;
3499 spec.fwdType = MIGRATION_FWD_DIRECT;
3501 ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
3502 cookieoutlen, flags, resource, &spec, dconn,
3505 if (spec.destType == MIGRATION_DEST_FD)
3506 VIR_FORCE_CLOSE(spec.dest.fd.qemu);
3508 virURIFree(uribits);
3514 static int doTunnelMigrate(virQEMUDriverPtr driver,
3517 const char *cookiein,
3521 unsigned long flags,
3522 unsigned long resource,
3523 virConnectPtr dconn,
3524 const char *graphicsuri)
3526 qemuDomainObjPrivatePtr priv = vm->privateData;
3527 virNetSocketPtr sock = NULL;
3529 qemuMigrationSpec spec;
3530 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
3532 VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
3533 "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
3535 driver, vm, st, NULLSTR(cookiein), cookieinlen,
3536 cookieout, cookieoutlen, flags, resource,
3537 NULLSTR(graphicsuri));
3539 if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
3540 !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
3541 !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
3542 virReportError(VIR_ERR_OPERATION_FAILED, "%s",
3543 _("Source qemu is too old to support tunnelled migration"));
3544 virObjectUnref(cfg);
3548 spec.fwdType = MIGRATION_FWD_STREAM;
3549 spec.fwd.stream = st;
3551 if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD)) {
3554 spec.destType = MIGRATION_DEST_FD;
3555 spec.dest.fd.qemu = -1;
3556 spec.dest.fd.local = -1;
3558 if (pipe2(fds, O_CLOEXEC) == 0) {
3559 spec.dest.fd.qemu = fds[1];
3560 spec.dest.fd.local = fds[0];
3562 if (spec.dest.fd.qemu == -1 ||
3563 virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
3564 spec.dest.fd.qemu) < 0) {
3565 virReportSystemError(errno, "%s",
3566 _("cannot create pipe for tunnelled migration"));
3570 spec.destType = MIGRATION_DEST_UNIX;
3571 spec.dest.unix_socket.sock = -1;
3572 spec.dest.unix_socket.file = NULL;
3574 if (virAsprintf(&spec.dest.unix_socket.file,
3575 "%s/qemu.tunnelmigrate.src.%s",
3576 cfg->libDir, vm->def->name) < 0)
3579 if (virNetSocketNewListenUNIX(spec.dest.unix_socket.file, 0700,
3580 cfg->user, cfg->group,
3582 virNetSocketListen(sock, 1) < 0)
3585 spec.dest.unix_socket.sock = virNetSocketGetFD(sock);
3588 ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
3589 cookieoutlen, flags, resource, &spec, dconn,
3593 if (spec.destType == MIGRATION_DEST_FD) {
3594 VIR_FORCE_CLOSE(spec.dest.fd.qemu);
3595 VIR_FORCE_CLOSE(spec.dest.fd.local);
3597 virObjectUnref(sock);
3598 VIR_FREE(spec.dest.unix_socket.file);
3601 virObjectUnref(cfg);
3606 /* This is essentially a re-impl of virDomainMigrateVersion2
3607 * from libvirt.c, but running in source libvirtd context,
3608 * instead of client app context & also adding in tunnel
3610 static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
3611 virConnectPtr sconn ATTRIBUTE_UNUSED,
3612 virConnectPtr dconn,
3614 const char *dconnuri,
3615 unsigned long flags,
3617 unsigned long resource)
3619 virDomainPtr ddomain = NULL;
3620 char *uri_out = NULL;
3621 char *cookie = NULL;
3622 char *dom_xml = NULL;
3623 int cookielen = 0, ret;
3624 virErrorPtr orig_err = NULL;
3626 virStreamPtr st = NULL;
3627 unsigned long destflags;
3629 VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
3630 "flags=%lx, dname=%s, resource=%lu",
3631 driver, sconn, dconn, vm, NULLSTR(dconnuri),
3632 flags, NULLSTR(dname), resource);
3634 /* In version 2 of the protocol, the prepare step is slightly
3635 * different. We fetch the domain XML of the source domain
3636 * and pass it to Prepare2.
3638 if (!(dom_xml = qemuDomainFormatXML(driver, vm,
3639 QEMU_DOMAIN_FORMAT_LIVE_FLAGS |
3640 VIR_DOMAIN_XML_MIGRATABLE)))
3643 if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
3644 flags |= VIR_MIGRATE_PAUSED;
3646 destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
3647 VIR_MIGRATE_AUTO_CONVERGE);
3649 VIR_DEBUG("Prepare2 %p", dconn);
3650 if (flags & VIR_MIGRATE_TUNNELLED) {
3652 * Tunnelled Migrate Version 2 does not support cookies
3653 * due to missing parameters in the prepareTunnel() API.
3656 if (!(st = virStreamNew(dconn, 0)))
3659 qemuDomainObjEnterRemote(vm);
3660 ret = dconn->driver->domainMigratePrepareTunnel
3661 (dconn, st, destflags, dname, resource, dom_xml);
3662 qemuDomainObjExitRemote(vm);
3664 qemuDomainObjEnterRemote(vm);
3665 ret = dconn->driver->domainMigratePrepare2
3666 (dconn, &cookie, &cookielen, NULL, &uri_out,
3667 destflags, dname, resource, dom_xml);
3668 qemuDomainObjExitRemote(vm);
3674 /* the domain may have shutdown or crashed while we had the locks dropped
3675 * in qemuDomainObjEnterRemote, so check again
3677 if (!virDomainObjIsActive(vm)) {
3678 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
3679 _("guest unexpectedly quit"));
3683 if (!(flags & VIR_MIGRATE_TUNNELLED) &&
3684 (uri_out == NULL)) {
3685 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
3686 _("domainMigratePrepare2 did not set uri"));
3688 orig_err = virSaveLastError();
3692 /* Perform the migration. The driver isn't supposed to return
3693 * until the migration is complete.
3695 VIR_DEBUG("Perform %p", sconn);
3696 qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
3697 if (flags & VIR_MIGRATE_TUNNELLED)
3698 ret = doTunnelMigrate(driver, vm, st,
3699 NULL, 0, NULL, NULL,
3700 flags, resource, dconn, NULL);
3702 ret = doNativeMigrate(driver, vm, uri_out,
3704 NULL, NULL, /* No out cookie with v2 migration */
3705 flags, resource, dconn, NULL);
3707 /* Perform failed. Make sure Finish doesn't overwrite the error */
3709 orig_err = virSaveLastError();
3711 /* If Perform returns < 0, then we need to cancel the VM
3712 * startup on the destination
3714 cancelled = ret < 0;
3717 /* In version 2 of the migration protocol, we pass the
3718 * status code from the sender to the destination host,
3719 * so it can do any cleanup if the migration failed.
3721 dname = dname ? dname : vm->def->name;
3722 VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
3723 qemuDomainObjEnterRemote(vm);
3724 ddomain = dconn->driver->domainMigrateFinish2
3725 (dconn, dname, cookie, cookielen,
3726 uri_out ? uri_out : dconnuri, destflags, cancelled);
3727 qemuDomainObjExitRemote(vm);
3728 if (cancelled && ddomain)
3729 VIR_ERROR(_("finish step ignored that migration was cancelled"));
3733 virObjectUnref(ddomain);
3742 virSetError(orig_err);
3743 virFreeError(orig_err);
3752 /* This is essentially a re-impl of virDomainMigrateVersion3
3753 * from libvirt.c, but running in source libvirtd context,
3754 * instead of client app context & also adding in tunnel
3757 doPeer2PeerMigrate3(virQEMUDriverPtr driver,
3758 virConnectPtr sconn,
3759 virConnectPtr dconn,
3760 const char *dconnuri,
3765 const char *graphicsuri,
3766 const char *listenAddress,
3767 unsigned long long bandwidth,
3769 unsigned long flags)
3771 virDomainPtr ddomain = NULL;
3772 char *uri_out = NULL;
3773 char *cookiein = NULL;
3774 char *cookieout = NULL;
3775 char *dom_xml = NULL;
3776 int cookieinlen = 0;
3777 int cookieoutlen = 0;
3779 virErrorPtr orig_err = NULL;
3780 bool cancelled = true;
3781 virStreamPtr st = NULL;
3782 unsigned long destflags;
3783 virTypedParameterPtr params = NULL;
3787 VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, dconnuri=%s, vm=%p, xmlin=%s, "
3788 "dname=%s, uri=%s, graphicsuri=%s, listenAddress=%s, "
3789 "bandwidth=%llu, useParams=%d, flags=%lx",
3790 driver, sconn, dconn, NULLSTR(dconnuri), vm, NULLSTR(xmlin),
3791 NULLSTR(dname), NULLSTR(uri), NULLSTR(graphicsuri),
3792 NULLSTR(listenAddress), bandwidth, useParams, flags);
3794 /* Unlike the virDomainMigrateVersion3 counterpart, we don't need
3795 * to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
3796 * bit here, because we are already running inside the context of
3799 dom_xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
3800 &cookieout, &cookieoutlen, flags);
3805 if (virTypedParamsAddString(¶ms, &nparams, &maxparams,
3806 VIR_MIGRATE_PARAM_DEST_XML, dom_xml) < 0)
3810 virTypedParamsAddString(¶ms, &nparams, &maxparams,
3811 VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
3815 virTypedParamsAddString(¶ms, &nparams, &maxparams,
3816 VIR_MIGRATE_PARAM_URI, uri) < 0)
3820 virTypedParamsAddULLong(¶ms, &nparams, &maxparams,
3821 VIR_MIGRATE_PARAM_BANDWIDTH,
3826 virTypedParamsAddString(¶ms, &nparams, &maxparams,
3827 VIR_MIGRATE_PARAM_GRAPHICS_URI,
3830 if (listenAddress &&
3831 virTypedParamsAddString(¶ms, &nparams, &maxparams,
3832 VIR_MIGRATE_PARAM_LISTEN_ADDRESS,
3837 if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
3838 flags |= VIR_MIGRATE_PAUSED;
3840 destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
3841 VIR_MIGRATE_AUTO_CONVERGE);
3843 VIR_DEBUG("Prepare3 %p", dconn);
3844 cookiein = cookieout;
3845 cookieinlen = cookieoutlen;
3848 if (flags & VIR_MIGRATE_TUNNELLED) {
3849 if (!(st = virStreamNew(dconn, 0)))
3852 qemuDomainObjEnterRemote(vm);
3854 ret = dconn->driver->domainMigratePrepareTunnel3Params
3855 (dconn, st, params, nparams, cookiein, cookieinlen,
3856 &cookieout, &cookieoutlen, destflags);
3858 ret = dconn->driver->domainMigratePrepareTunnel3
3859 (dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen,
3860 destflags, dname, bandwidth, dom_xml);
3862 qemuDomainObjExitRemote(vm);
3864 qemuDomainObjEnterRemote(vm);
3866 ret = dconn->driver->domainMigratePrepare3Params
3867 (dconn, params, nparams, cookiein, cookieinlen,
3868 &cookieout, &cookieoutlen, &uri_out, destflags);
3870 ret = dconn->driver->domainMigratePrepare3
3871 (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
3872 uri, &uri_out, destflags, dname, bandwidth, dom_xml);
3874 qemuDomainObjExitRemote(vm);
3880 if (flags & VIR_MIGRATE_OFFLINE) {
3881 VIR_DEBUG("Offline migration, skipping Perform phase");
3882 VIR_FREE(cookieout);
3891 virTypedParamsReplaceString(¶ms, &nparams,
3892 VIR_MIGRATE_PARAM_URI, uri_out) < 0) {
3893 orig_err = virSaveLastError();
3896 } else if (!uri && !(flags & VIR_MIGRATE_TUNNELLED)) {
3897 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
3898 _("domainMigratePrepare3 did not set uri"));
3899 orig_err = virSaveLastError();
3903 /* Perform the migration. The driver isn't supposed to return
3904 * until the migration is complete. The src VM should remain
3905 * running, but in paused state until the destination can
3906 * confirm migration completion.
3908 VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri));
3909 qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
3911 cookiein = cookieout;
3912 cookieinlen = cookieoutlen;
3915 if (flags & VIR_MIGRATE_TUNNELLED) {
3916 ret = doTunnelMigrate(driver, vm, st,
3917 cookiein, cookieinlen,
3918 &cookieout, &cookieoutlen,
3919 flags, bandwidth, dconn, graphicsuri);
3921 ret = doNativeMigrate(driver, vm, uri,
3922 cookiein, cookieinlen,
3923 &cookieout, &cookieoutlen,
3924 flags, bandwidth, dconn, graphicsuri);
3927 /* Perform failed. Make sure Finish doesn't overwrite the error */
3929 orig_err = virSaveLastError();
3931 qemuMigrationJobSetPhase(driver, vm,
3932 QEMU_MIGRATION_PHASE_PERFORM3_DONE);
3935 /* If Perform returns < 0, then we need to cancel the VM
3936 * startup on the destination
3938 cancelled = ret < 0;
3942 * The status code from the source is passed to the destination.
3943 * The dest can cleanup in the source indicated it failed to
3944 * send all migration data. Returns NULL for ddomain if
3945 * the dest was unable to complete migration.
3947 VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
3949 cookiein = cookieout;
3950 cookieinlen = cookieoutlen;
3955 if (virTypedParamsGetString(params, nparams,
3956 VIR_MIGRATE_PARAM_DEST_NAME, NULL) <= 0 &&
3957 virTypedParamsReplaceString(¶ms, &nparams,
3958 VIR_MIGRATE_PARAM_DEST_NAME,
3959 vm->def->name) < 0) {
3962 qemuDomainObjEnterRemote(vm);
3963 ddomain = dconn->driver->domainMigrateFinish3Params
3964 (dconn, params, nparams, cookiein, cookieinlen,
3965 &cookieout, &cookieoutlen, destflags, cancelled);
3966 qemuDomainObjExitRemote(vm);
3969 dname = dname ? dname : vm->def->name;
3970 qemuDomainObjEnterRemote(vm);
3971 ddomain = dconn->driver->domainMigrateFinish3
3972 (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
3973 dconnuri, uri, destflags, cancelled);
3974 qemuDomainObjExitRemote(vm);
3976 if (cancelled && ddomain)
3977 VIR_ERROR(_("finish step ignored that migration was cancelled"));
3979 /* If ddomain is NULL, then we were unable to start
3980 * the guest on the target, and must restart on the
3981 * source. There is a small chance that the ddomain
3982 * is NULL due to an RPC failure, in which case
3983 * ddomain could in fact be running on the dest.
3984 * The lock manager plugins should take care of
3985 * safety in this scenario.
3987 cancelled = ddomain == NULL;
3989 /* If finish3 set an error, and we don't have an earlier
3990 * one we need to preserve it in case confirm3 overwrites
3993 orig_err = virSaveLastError();
3996 * If cancelled, then src VM will be restarted, else
3999 VIR_DEBUG("Confirm3 %p cancelled=%d vm=%p", sconn, cancelled, vm);
4001 cookiein = cookieout;
4002 cookieinlen = cookieoutlen;
4005 ret = qemuMigrationConfirmPhase(driver, sconn, vm,
4006 cookiein, cookieinlen,
4008 /* If Confirm3 returns -1, there's nothing more we can
4009 * do, but fortunately worst case is that there is a
4010 * domain left in 'paused' state on source.
4013 VIR_WARN("Guest %s probably left in 'paused' state on source",
4018 virObjectUnref(ddomain);
4027 virSetError(orig_err);
4028 virFreeError(orig_err);
4032 VIR_FREE(cookieout);
4033 virTypedParamsFree(params, nparams);
4038 static int virConnectCredType[] = {
4040 VIR_CRED_PASSPHRASE,
4044 static virConnectAuth virConnectAuthConfig = {
4045 .credtype = virConnectCredType,
4046 .ncredtype = ARRAY_CARDINALITY(virConnectCredType),
4050 static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
4051 virConnectPtr sconn,
4054 const char *dconnuri,
4056 const char *graphicsuri,
4057 const char *listenAddress,
4058 unsigned long flags,
4060 unsigned long resource,
4064 virConnectPtr dconn = NULL;
4066 virErrorPtr orig_err = NULL;
4067 bool offline = false;
4068 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4071 VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
4072 "uri=%s, graphicsuri=%s, listenAddress=%s, flags=%lx, "
4073 "dname=%s, resource=%lu",
4074 driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4075 NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4076 flags, NULLSTR(dname), resource);
4078 /* the order of operations is important here; we make sure the
4079 * destination side is completely setup before we touch the source
4082 qemuDomainObjEnterRemote(vm);
4083 dconn = virConnectOpenAuth(dconnuri, &virConnectAuthConfig, 0);
4084 qemuDomainObjExitRemote(vm);
4085 if (dconn == NULL) {
4086 virReportError(VIR_ERR_OPERATION_FAILED,
4087 _("Failed to connect to remote libvirt URI %s: %s"),
4088 dconnuri, virGetLastErrorMessage());
4089 virObjectUnref(cfg);
4093 if (virConnectSetKeepAlive(dconn, cfg->keepAliveInterval,
4094 cfg->keepAliveCount) < 0)
4097 qemuDomainObjEnterRemote(vm);
4098 p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
4099 VIR_DRV_FEATURE_MIGRATION_P2P);
4100 /* v3proto reflects whether the caller used Perform3, but with
4101 * p2p migrate, regardless of whether Perform2 or Perform3
4102 * were used, we decide protocol based on what target supports
4104 *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
4105 VIR_DRV_FEATURE_MIGRATION_V3);
4106 useParams = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
4107 VIR_DRV_FEATURE_MIGRATION_PARAMS);
4108 if (flags & VIR_MIGRATE_OFFLINE)
4109 offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
4110 VIR_DRV_FEATURE_MIGRATION_OFFLINE);
4111 qemuDomainObjExitRemote(vm);
4114 virReportError(VIR_ERR_OPERATION_FAILED, "%s",
4115 _("Destination libvirt does not support peer-to-peer migration protocol"));
4119 /* Only xmlin, dname, uri, and bandwidth parameters can be used with
4120 * old-style APIs. */
4121 if (!useParams && graphicsuri) {
4122 virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
4123 _("Migration APIs with extensible parameters are not "
4124 "supported but extended parameters were passed"));
4128 if (flags & VIR_MIGRATE_OFFLINE && !offline) {
4129 virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
4130 _("offline migration is not supported by "
4131 "the destination host"));
4135 /* domain may have been stopped while we were talking to remote daemon */
4136 if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
4137 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
4138 _("guest unexpectedly quit"));
4142 /* Change protection is only required on the source side (us), and
4143 * only for v3 migration when begin and perform are separate jobs.
4144 * But peer-2-peer is already a single job, and we still want to
4145 * talk to older destinations that would reject the flag.
4146 * Therefore it is safe to clear the bit here. */
4147 flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
4150 ret = doPeer2PeerMigrate3(driver, sconn, dconn, dconnuri, vm, xmlin,
4151 dname, uri, graphicsuri, listenAddress,
4152 resource, useParams, flags);
4154 ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
4155 dconnuri, flags, dname, resource);
4159 orig_err = virSaveLastError();
4160 qemuDomainObjEnterRemote(vm);
4161 virObjectUnref(dconn);
4162 qemuDomainObjExitRemote(vm);
4164 virSetError(orig_err);
4165 virFreeError(orig_err);
4167 virObjectUnref(cfg);
4173 * This implements perform part of the migration protocol when migration job
4174 * does not need to be active across several APIs, i.e., peer2peer migration or
4175 * perform phase of v2 non-peer2peer migration.
4178 qemuMigrationPerformJob(virQEMUDriverPtr driver,
4182 const char *dconnuri,
4184 const char *graphicsuri,
4185 const char *listenAddress,
4186 const char *cookiein,
4190 unsigned long flags,
4192 unsigned long resource,
4195 virObjectEventPtr event = NULL;
4197 virErrorPtr orig_err = NULL;
4198 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4199 bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
4201 if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
4204 if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
4205 virReportError(VIR_ERR_OPERATION_INVALID,
4206 "%s", _("domain is not running"));
4210 if (!qemuMigrationIsAllowed(driver, vm, NULL, true, abort_on_error))
4213 if (!(flags & VIR_MIGRATE_UNSAFE) && !qemuMigrationIsSafe(vm->def))
4216 qemuMigrationStoreDomainState(vm);
4218 if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
4219 ret = doPeer2PeerMigrate(driver, conn, vm, xmlin,
4220 dconnuri, uri, graphicsuri, listenAddress,
4221 flags, dname, resource, &v3proto);
4223 qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
4224 ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
4225 cookieout, cookieoutlen,
4226 flags, resource, NULL, NULL);
4232 * In v3 protocol, the source VM is not killed off until the
4236 qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
4237 VIR_QEMU_PROCESS_STOP_MIGRATED);
4238 virDomainAuditStop(vm, "migrated");
4239 event = virDomainEventLifecycleNewFromObj(vm,
4240 VIR_DOMAIN_EVENT_STOPPED,
4241 VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
4246 orig_err = virSaveLastError();
4248 if (qemuMigrationRestoreDomainState(conn, vm)) {
4249 event = virDomainEventLifecycleNewFromObj(vm,
4250 VIR_DOMAIN_EVENT_RESUMED,
4251 VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
4254 if (!qemuMigrationJobFinish(driver, vm)) {
4256 } else if (!virDomainObjIsActive(vm) &&
4258 (ret == 0 && (flags & VIR_MIGRATE_UNDEFINE_SOURCE)))) {
4259 if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
4260 virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
4261 qemuDomainRemoveInactive(driver, vm);
4266 virSetError(orig_err);
4267 virFreeError(orig_err);
4272 virObjectUnlock(vm);
4274 qemuDomainEventQueue(driver, event);
4275 virObjectUnref(cfg);
4280 * This implements perform phase of v3 migration protocol.
4283 qemuMigrationPerformPhase(virQEMUDriverPtr driver,
4287 const char *graphicsuri,
4288 const char *cookiein,
4292 unsigned long flags,
4293 unsigned long resource)
4295 virObjectEventPtr event = NULL;
4299 /* If we didn't start the job in the begin phase, start it now. */
4300 if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
4301 if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
4303 } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
4307 qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4308 virCloseCallbacksUnset(driver->closeCallbacks, vm,
4309 qemuMigrationCleanup);
4311 ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
4312 cookieout, cookieoutlen,
4313 flags, resource, NULL, graphicsuri);
4316 if (qemuMigrationRestoreDomainState(conn, vm)) {
4317 event = virDomainEventLifecycleNewFromObj(vm,
4318 VIR_DOMAIN_EVENT_RESUMED,
4319 VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
4324 qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);
4326 if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
4327 qemuMigrationCleanup) < 0)
4332 hasrefs = qemuMigrationJobFinish(driver, vm);
4334 hasrefs = qemuMigrationJobContinue(vm);
4337 } else if (!virDomainObjIsActive(vm) && !vm->persistent) {
4338 qemuDomainRemoveInactive(driver, vm);
4344 virObjectUnlock(vm);
4346 qemuDomainEventQueue(driver, event);
4351 qemuMigrationPerform(virQEMUDriverPtr driver,
4355 const char *dconnuri,
4357 const char *graphicsuri,
4358 const char *listenAddress,
4359 const char *cookiein,
4363 unsigned long flags,
4365 unsigned long resource,
4368 VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
4369 "uri=%s, graphicsuri=%s, listenAddress=%s"
4370 "cookiein=%s, cookieinlen=%d, cookieout=%p, cookieoutlen=%p, "
4371 "flags=%lx, dname=%s, resource=%lu, v3proto=%d",
4372 driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4373 NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4374 NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen,
4375 flags, NULLSTR(dname), resource, v3proto);
4377 if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
4379 virReportError(VIR_ERR_OPERATION_INVALID,
4380 "%s", _("received unexpected cookie with P2P migration"));
4384 return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri, uri,
4385 graphicsuri, listenAddress,
4386 cookiein, cookieinlen,
4387 cookieout, cookieoutlen,
4388 flags, dname, resource, v3proto);
4391 virReportError(VIR_ERR_INTERNAL_ERROR,
4392 "%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
4397 return qemuMigrationPerformPhase(driver, conn, vm, uri,
4399 cookiein, cookieinlen,
4400 cookieout, cookieoutlen,
4403 return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri,
4404 uri, graphicsuri, listenAddress,
4405 cookiein, cookieinlen,
4406 cookieout, cookieoutlen, flags,
4407 dname, resource, v3proto);
4413 qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def)
4416 int last_good_net = -1;
4417 virDomainNetDefPtr net;
4419 for (i = 0; i < def->nnets; i++) {
4421 if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4422 if (virNetDevVPortProfileAssociate(net->ifname,
4423 virDomainNetGetActualVirtPortProfile(net),
4425 virDomainNetGetActualDirectDev(net),
4428 VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
4430 virReportError(VIR_ERR_OPERATION_FAILED,
4431 _("Port profile Associate failed for %s"),
4435 VIR_DEBUG("Port profile Associate succeeded for %s", net->ifname);
4437 if (virNetDevMacVLanVPortProfileRegisterCallback(net->ifname, &net->mac,
4438 virDomainNetGetActualDirectDev(net), def->uuid,
4439 virDomainNetGetActualVirtPortProfile(net),
4440 VIR_NETDEV_VPORT_PROFILE_OP_CREATE))
4449 for (i = 0; last_good_net != -1 && i < last_good_net; i++) {
4451 if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4452 ignore_value(virNetDevVPortProfileDisassociate(net->ifname,
4453 virDomainNetGetActualVirtPortProfile(net),
4455 virDomainNetGetActualDirectDev(net),
4457 VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH));
4465 qemuMigrationFinish(virQEMUDriverPtr driver,
4466 virConnectPtr dconn,
4468 const char *cookiein,
4472 unsigned long flags,
4476 virDomainPtr dom = NULL;
4477 virObjectEventPtr event = NULL;
4479 qemuMigrationCookiePtr mig = NULL;
4480 virErrorPtr orig_err = NULL;
4481 int cookie_flags = 0;
4482 qemuDomainObjPrivatePtr priv = vm->privateData;
4483 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4484 virCapsPtr caps = NULL;
4485 unsigned short port;
4487 VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
4488 "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d",
4489 driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
4490 cookieout, cookieoutlen, flags, retcode);
4492 port = priv->migrationPort;
4493 priv->migrationPort = 0;
4495 if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
4498 if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
4501 qemuMigrationJobStartPhase(driver, vm,
4502 v3proto ? QEMU_MIGRATION_PHASE_FINISH3
4503 : QEMU_MIGRATION_PHASE_FINISH2);
4505 qemuDomainCleanupRemove(vm, qemuMigrationPrepareCleanup);
4507 cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK;
4508 if (flags & VIR_MIGRATE_PERSIST_DEST)
4509 cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
4511 if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein,
4512 cookieinlen, cookie_flags)))
4515 /* Did the migration go as planned? If yes, return the domain
4516 * object, but if no, clean up the empty qemu process.
4519 if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
4520 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
4521 _("guest unexpectedly quit"));
4525 if (!(flags & VIR_MIGRATE_OFFLINE)) {
4526 if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
4527 qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
4528 VIR_QEMU_PROCESS_STOP_MIGRATED);
4529 virDomainAuditStop(vm, "failed");
4530 event = virDomainEventLifecycleNewFromObj(vm,
4531 VIR_DOMAIN_EVENT_STOPPED,
4532 VIR_DOMAIN_EVENT_STOPPED_FAILED);
4536 if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
4537 VIR_WARN("unable to provide network data for relocation");
4540 qemuMigrationStopNBDServer(driver, vm, mig);
4542 if (flags & VIR_MIGRATE_PERSIST_DEST) {
4543 virDomainDefPtr vmdef;
4547 if (mig->persistent)
4548 vm->newDef = vmdef = mig->persistent;
4550 vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm);
4551 if (!vmdef || virDomainSaveConfig(cfg->configDir, vmdef) < 0) {
4552 /* Hmpf. Migration was successful, but making it persistent
4553 * was not. If we report successful, then when this domain
4554 * shuts down, management tools are in for a surprise. On the
4555 * other hand, if we report failure, then the management tools
4556 * might try to restart the domain on the source side, even
4557 * though the domain is actually running on the destination.
4558 * Return a NULL dom pointer, and hope that this is a rare
4559 * situation and management tools are smart.
4563 * However, in v3 protocol, the source VM is still available
4564 * to restart during confirm() step, so we kill it off now.
4567 if (!(flags & VIR_MIGRATE_OFFLINE)) {
4568 qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
4569 VIR_QEMU_PROCESS_STOP_MIGRATED);
4570 virDomainAuditStop(vm, "failed");
4576 virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
4577 _("can't get vmdef"));
4581 event = virDomainEventLifecycleNewFromObj(vm,
4582 VIR_DOMAIN_EVENT_DEFINED,
4584 VIR_DOMAIN_EVENT_DEFINED_ADDED :
4585 VIR_DOMAIN_EVENT_DEFINED_UPDATED);
4587 qemuDomainEventQueue(driver, event);
4591 if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
4592 /* run 'cont' on the destination, which allows migration on qemu
4593 * >= 0.10.6 to work properly. This isn't strictly necessary on
4594 * older qemu's, but it also doesn't hurt anything there
4596 if (qemuProcessStartCPUs(driver, vm, dconn,
4597 VIR_DOMAIN_RUNNING_MIGRATED,
4598 QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
4599 if (virGetLastError() == NULL)
4600 virReportError(VIR_ERR_INTERNAL_ERROR,
4601 "%s", _("resume operation failed"));
4602 /* Need to save the current error, in case shutting
4603 * down the process overwrites it
4605 orig_err = virSaveLastError();
4608 * In v3 protocol, the source VM is still available to
4609 * restart during confirm() step, so we kill it off
4611 * In v2 protocol, the source is dead, so we leave
4612 * target in paused state, in case admin can fix
4616 qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
4617 VIR_QEMU_PROCESS_STOP_MIGRATED);
4618 virDomainAuditStop(vm, "failed");
4619 event = virDomainEventLifecycleNewFromObj(vm,
4620 VIR_DOMAIN_EVENT_STOPPED,
4621 VIR_DOMAIN_EVENT_STOPPED_FAILED);
4627 dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
4629 if (!(flags & VIR_MIGRATE_OFFLINE)) {
4630 event = virDomainEventLifecycleNewFromObj(vm,
4631 VIR_DOMAIN_EVENT_RESUMED,
4632 VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
4633 if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
4634 virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
4635 VIR_DOMAIN_PAUSED_USER);
4637 qemuDomainEventQueue(driver, event);
4638 event = virDomainEventLifecycleNewFromObj(vm,
4639 VIR_DOMAIN_EVENT_SUSPENDED,
4640 VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
4644 if (virDomainObjIsActive(vm) &&
4645 virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) {
4646 VIR_WARN("Failed to save status on vm %s", vm->def->name);
4650 /* Guest is successfully running, so cancel previous auto destroy */
4651 qemuProcessAutoDestroyRemove(driver, vm);
4652 } else if (!(flags & VIR_MIGRATE_OFFLINE)) {
4653 qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
4654 VIR_QEMU_PROCESS_STOP_MIGRATED);
4655 virDomainAuditStop(vm, "failed");
4656 event = virDomainEventLifecycleNewFromObj(vm,
4657 VIR_DOMAIN_EVENT_STOPPED,
4658 VIR_DOMAIN_EVENT_STOPPED_FAILED);
4661 if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
4662 VIR_WARN("Unable to encode migration cookie");
4665 if (qemuMigrationJobFinish(driver, vm) == 0) {
4667 } else if (!vm->persistent && !virDomainObjIsActive(vm)) {
4668 qemuDomainRemoveInactive(driver, vm);
4673 virPortAllocatorRelease(driver->migrationPorts, port);
4675 VIR_FREE(priv->origname);
4676 virObjectUnlock(vm);
4679 qemuDomainEventQueue(driver, event);
4680 qemuMigrationCookieFree(mig);
4682 virSetError(orig_err);
4683 virFreeError(orig_err);
4685 virObjectUnref(caps);
4686 virObjectUnref(cfg);
4691 /* Helper function called while vm is active. */
4693 qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
4694 int fd, off_t offset, const char *path,
4695 const char *compressor,
4696 bool bypassSecurityDriver,
4697 enum qemuDomainAsyncJob asyncJob)
4699 qemuDomainObjPrivatePtr priv = vm->privateData;
4702 bool restoreLabel = false;
4703 virCommandPtr cmd = NULL;
4704 int pipeFD[2] = { -1, -1 };
4705 unsigned long saveMigBandwidth = priv->migMaxBandwidth;
4706 char *errbuf = NULL;
4707 virErrorPtr orig_err = NULL;
4709 /* Increase migration bandwidth to unlimited since target is a file.
4710 * Failure to change migration speed is not fatal. */
4711 if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
4712 qemuMonitorSetMigrationSpeed(priv->mon,
4713 QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
4714 priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
4715 qemuDomainObjExitMonitor(driver, vm);
4718 if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
4719 (!compressor || pipe(pipeFD) == 0)) {
4720 /* All right! We can use fd migration, which means that qemu
4721 * doesn't have to open() the file, so while we still have to
4722 * grant SELinux access, we can do it on fd and avoid cleanup
4723 * later, as well as skip futzing with cgroup. */
4724 if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
4725 compressor ? pipeFD[1] : fd) < 0)
4727 bypassSecurityDriver = true;
4729 /* Phooey - we have to fall back on exec migration, where qemu
4730 * has to popen() the file by name, and block devices have to be
4731 * given cgroup ACL permission. We might also stumble on
4732 * a race present in some qemu versions where it does a wait()
4733 * that botches pclose. */
4734 if (virCgroupHasController(priv->cgroup,
4735 VIR_CGROUP_CONTROLLER_DEVICES)) {
4736 int rv = virCgroupAllowDevicePath(priv->cgroup, path,
4737 VIR_CGROUP_DEVICE_RW);
4738 virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path, "rw", rv == 0);
4740 /* path was not a device, no further need for cgroup */
4741 } else if (rv < 0) {
4745 if ((!bypassSecurityDriver) &&
4746 virSecurityManagerSetSavedStateLabel(driver->securityManager,
4749 restoreLabel = true;
4752 if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
4756 const char *args[] = { "cat", NULL };
4758 if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
4759 priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX) {
4760 rc = qemuMonitorMigrateToFd(priv->mon,
4761 QEMU_MONITOR_MIGRATE_BACKGROUND,
4764 rc = qemuMonitorMigrateToFile(priv->mon,
4765 QEMU_MONITOR_MIGRATE_BACKGROUND,
4766 args, path, offset);
4769 const char *prog = compressor;
4770 const char *args[] = {
4775 if (pipeFD[0] != -1) {
4776 cmd = virCommandNewArgs(args);
4777 virCommandSetInputFD(cmd, pipeFD[0]);
4778 virCommandSetOutputFD(cmd, &fd);
4779 virCommandSetErrorBuffer(cmd, &errbuf);
4780 virCommandDoAsyncIO(cmd);
4781 if (virSetCloseExec(pipeFD[1]) < 0) {
4782 virReportSystemError(errno, "%s",
4783 _("Unable to set cloexec flag"));
4784 qemuDomainObjExitMonitor(driver, vm);
4787 if (virCommandRunAsync(cmd, NULL) < 0) {
4788 qemuDomainObjExitMonitor(driver, vm);
4791 rc = qemuMonitorMigrateToFd(priv->mon,
4792 QEMU_MONITOR_MIGRATE_BACKGROUND,
4794 if (VIR_CLOSE(pipeFD[0]) < 0 ||
4795 VIR_CLOSE(pipeFD[1]) < 0)
4796 VIR_WARN("failed to close intermediate pipe");
4798 rc = qemuMonitorMigrateToFile(priv->mon,
4799 QEMU_MONITOR_MIGRATE_BACKGROUND,
4800 args, path, offset);
4803 qemuDomainObjExitMonitor(driver, vm);
4808 rc = qemuMigrationWaitForCompletion(driver, vm, asyncJob, NULL, false);
4812 orig_err = virSaveLastError();
4813 virCommandAbort(cmd);
4814 if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
4815 qemuMonitorMigrateCancel(priv->mon);
4816 qemuDomainObjExitMonitor(driver, vm);
4822 if (cmd && virCommandWait(cmd, NULL) < 0)
4828 if (ret < 0 && !orig_err)
4829 orig_err = virSaveLastError();
4831 /* Restore max migration bandwidth */
4832 if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
4833 qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth);
4834 priv->migMaxBandwidth = saveMigBandwidth;
4835 qemuDomainObjExitMonitor(driver, vm);
4838 VIR_FORCE_CLOSE(pipeFD[0]);
4839 VIR_FORCE_CLOSE(pipeFD[1]);
4841 VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf));
4843 virCommandFree(cmd);
4845 if (restoreLabel && (!bypassSecurityDriver) &&
4846 virSecurityManagerRestoreSavedStateLabel(driver->securityManager,
4848 VIR_WARN("failed to restore save state label on %s", path);
4850 if (virCgroupHasController(priv->cgroup,
4851 VIR_CGROUP_CONTROLLER_DEVICES)) {
4852 int rv = virCgroupDenyDevicePath(priv->cgroup, path,
4853 VIR_CGROUP_DEVICE_RWM);
4854 virDomainAuditCgroupPath(vm, priv->cgroup, "deny", path, "rwm", rv == 0);
4858 virSetError(orig_err);
4859 virFreeError(orig_err);
4866 qemuMigrationJobStart(virQEMUDriverPtr driver,
4868 enum qemuDomainAsyncJob job)
4870 qemuDomainObjPrivatePtr priv = vm->privateData;
4872 if (qemuDomainObjBeginAsyncJob(driver, vm, job) < 0)
4875 if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
4876 qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
4878 qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK |
4879 JOB_MASK(QEMU_JOB_SUSPEND) |
4880 JOB_MASK(QEMU_JOB_MIGRATION_OP));
4883 priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
4889 qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
4891 enum qemuMigrationJobPhase phase)
4893 qemuDomainObjPrivatePtr priv = vm->privateData;
4895 if (phase < priv->job.phase) {
4896 VIR_ERROR(_("migration protocol going backwards %s => %s"),
4897 qemuMigrationJobPhaseTypeToString(priv->job.phase),
4898 qemuMigrationJobPhaseTypeToString(phase));
4902 qemuDomainObjSetJobPhase(driver, vm, phase);
4906 qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
4908 enum qemuMigrationJobPhase phase)
4911 qemuMigrationJobSetPhase(driver, vm, phase);
4915 qemuMigrationJobContinue(virDomainObjPtr vm)
4917 qemuDomainObjReleaseAsyncJob(vm);
4918 return virObjectUnref(vm);
4922 qemuMigrationJobIsActive(virDomainObjPtr vm,
4923 enum qemuDomainAsyncJob job)
4925 qemuDomainObjPrivatePtr priv = vm->privateData;
4927 if (priv->job.asyncJob != job) {
4930 if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
4931 msg = _("domain '%s' is not processing incoming migration");
4933 msg = _("domain '%s' is not being migrated");
4935 virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
4942 qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
4944 return qemuDomainObjEndAsyncJob(driver, vm);