Discussion:
[libvirt] [PATCH RFC 1/3] qemu: Add check for whether nesting was enabled
John Ferlan
2018-11-13 20:21:04 UTC
Permalink
Support for nested kvm is handled via a kernel module configuration
adjustment which if done after libvirtd is started and/or the last
QEMU capabilities adjustment can result in the inability to start a
guest and use nested kvm until the capabilities cache is invalidated.

Thus, let's fetch and save the setting during initialization and then
when the capabilities are checked for various host related adjustments
that could affect whether the capabilities cache is updated add a check
whether the nested value was set for either kvm_intel or kvm_amd to
force a refetch of the capabilities.

Signed-off-by: John Ferlan <***@redhat.com>
---
src/qemu/qemu_capabilities.c | 43 ++++++++++++++++++++++++++++++++++++
src/qemu/qemu_capspriv.h | 2 ++
tests/qemucapabilitiestest.c | 3 +++
3 files changed, 48 insertions(+)

diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 2ca5af3297..ebe0c0c7df 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -40,6 +40,7 @@
#include "virnodesuspend.h"
#include "virnuma.h"
#include "virhostcpu.h"
+#include "virkmod.h"
#include "qemu_monitor.h"
#include "virstring.h"
#include "qemu_hostdev.h"
@@ -551,6 +552,7 @@ struct _virQEMUCaps {
virObject parent;

bool usedQMP;
+ bool isNested;

char *binary;
time_t ctime;
@@ -1512,6 +1514,7 @@ virQEMUCapsPtr virQEMUCapsNewCopy(virQEMUCapsPtr qemuCaps)
return NULL;

ret->usedQMP = qemuCaps->usedQMP;
+ ret->isNested = qemuCaps->isNested;

if (VIR_STRDUP(ret->binary, qemuCaps->binary) < 0)
goto error;
@@ -3567,6 +3570,9 @@ virQEMUCapsLoadCache(virArch hostArch,
virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_KVM);
virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_QEMU);

+ qemuCaps->isNested = virXPathBoolean("count(./isNested) > 0",
+ ctxt) > 0;
+
ret = 0;
cleanup:
VIR_FREE(str);
@@ -3786,6 +3792,9 @@ virQEMUCapsFormatCache(virQEMUCapsPtr qemuCaps)
if (qemuCaps->sevCapabilities)
virQEMUCapsFormatSEVInfo(qemuCaps, &buf);

+ if (qemuCaps->isNested)
+ virBufferAddLit(&buf, "<isNested/>\n");
+
virBufferAdjustIndent(&buf, -2);
virBufferAddLit(&buf, "</qemuCaps>\n");

@@ -3826,6 +3835,28 @@ virQEMUCapsSaveFile(void *data,
}


+static bool
+virQEMUCapsIsNested(void)
+{
+ VIR_AUTOFREE(char *) kConfig = NULL;
+
+ if ((kConfig = virKModConfig()) &&
+ (strstr(kConfig, "kvm_intel nested=1") ||
+ strstr(kConfig, "kvm_amd nested=1")))
+ return true;
+ return false;
+}
+
+
+void
+virQEMUCapsClearIsNested(virQEMUCapsPtr qemuCaps)
+{
+ /* For qemucapabilitiestest to avoid printing the </isNested> on
+ * hosts with nested set in the kernel */
+ qemuCaps->isNested = false;
+}
+
+
static bool
virQEMUCapsIsValid(void *data,
void *privData)
@@ -3834,6 +3865,7 @@ virQEMUCapsIsValid(void *data,
virQEMUCapsCachePrivPtr priv = privData;
bool kvmUsable;
struct stat sb;
+ bool isNested;

if (!qemuCaps->binary)
return true;
@@ -3866,6 +3898,15 @@ virQEMUCapsIsValid(void *data,
return false;
}

+ /* Check if someone changed the nested={0|1} value for the kernel from
+ * the previous time we checked. If so, then refresh the capabilities. */
+ isNested = virQEMUCapsIsNested();
+ if (isNested != qemuCaps->isNested) {
+ VIR_WARN("changed kernel nested kvm value was %d", qemuCaps->isNested);
+ qemuCaps->isNested = isNested;
+ return false;
+ }
+
if (!virQEMUCapsGuestIsNative(priv->hostArch, qemuCaps->arch)) {
VIR_DEBUG("Guest arch (%s) is not native to host arch (%s), "
"skipping KVM-related checks",
@@ -4452,6 +4493,8 @@ virQEMUCapsInitQMP(virQEMUCapsPtr qemuCaps,
if (virQEMUCapsInitQMPMonitor(qemuCaps, cmd->mon) < 0)
goto cleanup;

+ qemuCaps->isNested = virQEMUCapsIsNested();
+
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM)) {
virQEMUCapsInitQMPCommandAbort(cmd);
if ((rc = virQEMUCapsInitQMPCommandRun(cmd, true)) != 0) {
diff --git a/src/qemu/qemu_capspriv.h b/src/qemu/qemu_capspriv.h
index 8d1a40fe74..b5d6aae2e5 100644
--- a/src/qemu/qemu_capspriv.h
+++ b/src/qemu/qemu_capspriv.h
@@ -48,6 +48,8 @@ int
virQEMUCapsInitQMPMonitor(virQEMUCapsPtr qemuCaps,
qemuMonitorPtr mon);

+void virQEMUCapsClearIsNested(virQEMUCapsPtr qemuCaps);
+
int
virQEMUCapsInitQMPMonitorTCG(virQEMUCapsPtr qemuCaps,
qemuMonitorPtr mon);
diff --git a/tests/qemucapabilitiestest.c b/tests/qemucapabilitiestest.c
index 8fe5a55e1d..703fb6a125 100644
--- a/tests/qemucapabilitiestest.c
+++ b/tests/qemucapabilitiestest.c
@@ -63,6 +63,9 @@ testQemuCaps(const void *opaque)
qemuMonitorTestGetMonitor(mon)) < 0)
goto cleanup;

+ /* Don't apply what the host has... force clear for testing purposes */
+ virQEMUCapsClearIsNested(capsActual);
+
if (virQEMUCapsGet(capsActual, QEMU_CAPS_KVM)) {
qemuMonitorResetCommandID(qemuMonitorTestGetMonitor(mon));
if (virQEMUCapsInitQMPMonitorTCG(capsActual,
--
2.17.2
John Ferlan
2018-11-13 20:21:05 UTC
Permalink
Add an API to allow traversing each of the file cache elements
and invoking a virHashIterator callback function for each element.

Signed-off-by: John Ferlan <***@redhat.com>
---
src/libvirt_private.syms | 1 +
src/util/virfilecache.c | 24 ++++++++++++++++++++++++
src/util/virfilecache.h | 5 +++++
3 files changed, 30 insertions(+)

diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 2343a757c1..d59126c83f 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -1882,6 +1882,7 @@ virFindFileInPath;


# util/virfilecache.h
+virFileCacheForEach;
virFileCacheGetPriv;
virFileCacheInsertData;
virFileCacheLookup;
diff --git a/src/util/virfilecache.c b/src/util/virfilecache.c
index 15c0d99fd9..cb7580e11a 100644
--- a/src/util/virfilecache.c
+++ b/src/util/virfilecache.c
@@ -349,6 +349,30 @@ virFileCacheLookupByFunc(virFileCachePtr cache,
}


+/**
+ * virFileCacheForEach
+ * @cache: existing cache object
+ * @iter: an iterator to process each data
+ * @opaque: opaque data to pass to the iterator
+ *
+ * For each element in the file cache, run the @iter which uses
+ * @virHashIterator arguments with @opaque data.
+ */
+int
+virFileCacheForEach(virFileCachePtr cache,
+ virHashIterator iter,
+ void *opaque)
+{
+ int ret;
+
+ virObjectLock(cache);
+ ret = virHashForEach(cache->table, iter, opaque);
+ virObjectUnlock(cache);
+
+ return ret;
+}
+
+
/**
* virFileCacheGetPriv:
* @cache: existing cache object
diff --git a/src/util/virfilecache.h b/src/util/virfilecache.h
index af6a189d7f..44b766153e 100644
--- a/src/util/virfilecache.h
+++ b/src/util/virfilecache.h
@@ -122,6 +122,11 @@ virFileCacheLookupByFunc(virFileCachePtr cache,
virHashSearcher iter,
const void *iterData);

+int
+virFileCacheForEach(virFileCachePtr cache,
+ virHashIterator iter,
+ void *opaque);
+
void *
virFileCacheGetPriv(virFileCachePtr cache);
--
2.17.2
John Ferlan
2018-11-13 20:21:06 UTC
Permalink
Provide a mechanism via the QEMU driver reload functionality
to invalidate all the capabilities cache and force a reread
of the capabilities rather than requiring an admin to "know"
they need to remove all the capability cache files and restart
libvirtd. This still requires usage of the reload functionality,
but it provides the "internal mechanism" in order to cause a
reread rather than the "external need" to remove and restart.

Signed-off-by: John Ferlan <***@redhat.com>
---
src/qemu/qemu_capabilities.c | 35 +++++++++++++++++++++++++++++++++++
src/qemu/qemu_capabilities.h | 2 ++
src/qemu/qemu_driver.c | 4 +++-
3 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index ebe0c0c7df..6b66ee006c 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -553,6 +553,7 @@ struct _virQEMUCaps {

bool usedQMP;
bool isNested;
+ bool invalid;

char *binary;
time_t ctime;
@@ -3870,6 +3871,11 @@ virQEMUCapsIsValid(void *data,
if (!qemuCaps->binary)
return true;

+ if (qemuCaps->invalid) {
+ VIR_DEBUG("capabilities for '%s' invalidated", qemuCaps->binary);
+ return false;
+ }
+
if (qemuCaps->libvirtCtime != virGetSelfLastChanged() ||
qemuCaps->libvirtVersion != LIBVIR_VERSION_NUMBER) {
VIR_DEBUG("Outdated capabilities for '%s': libvirt changed "
@@ -4958,6 +4964,35 @@ virQEMUCapsCacheLookupDefault(virFileCachePtr cache,
return ret;
}

+
+/** virQEMUCapsInvalidateCapabilities:
+ *
+ * Using the @driver and existing qemuCapsCache, force all the data
+ * in the cache to be invalidated so that a subsequent isValid call
+ * force a refetch of the capabilities data.
+ */
+
+static int
+virQEMUCapsInvalidateData(void *payload,
+ const void *name ATTRIBUTE_UNUSED,
+ void *opaque ATTRIBUTE_UNUSED)
+{
+ virQEMUCaps *qemuCaps = payload;
+
+ qemuCaps->invalid = true;
+
+ return 0;
+}
+
+
+void
+virQEMUCapsInvalidateCapabilities(virFileCachePtr cache)
+{
+ virFileCacheForEach(cache, virQEMUCapsInvalidateData, NULL);
+ return;
+}
+
+
bool
virQEMUCapsSupportsVmport(virQEMUCapsPtr qemuCaps,
const virDomainDef *def)
diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
index 6bb9a2c8f0..0600ecd424 100644
--- a/src/qemu/qemu_capabilities.h
+++ b/src/qemu/qemu_capabilities.h
@@ -598,6 +598,8 @@ virCapsPtr virQEMUCapsInit(virFileCachePtr cache);
int virQEMUCapsGetDefaultVersion(virCapsPtr caps,
virFileCachePtr capsCache,
unsigned int *version);
+void
+virQEMUCapsInvalidateCapabilities(virFileCachePtr cache);

VIR_ENUM_DECL(virQEMUCaps);

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 09e04b8544..3b6b399e5b 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -968,7 +968,9 @@ qemuStateReload(void)
if (!qemu_driver)
return 0;

- if (!(caps = virQEMUDriverGetCapabilities(qemu_driver, false)))
+ virQEMUCapsInvalidateCapabilities(qemu_driver->qemuCapsCache);
+
+ if (!(caps = virQEMUDriverGetCapabilities(qemu_driver, true)))
goto cleanup;

cfg = virQEMUDriverGetConfig(qemu_driver);
--
2.17.2
Daniel P. Berrangé
2018-11-14 09:25:09 UTC
Permalink
Sending as an RFC primarily because I'm looking for whether either
or both mechanisms in the series is more or less desired. Likewise,
if it's felt that the current process of telling customers to just
delete the cache is acceptible, then so be it. If there's other ideas
I'm willing to give them a go too. I did consider adding a virsh
option to "virsh capabilities" (still possible) and/or a virt-admin
option to force the refresh. These just were "easier" and didn't
require an API adjustment to implement.
Patch1 is essentially a means to determine if the kernel config
was changed to allow nested virtualization and to force a refresh
of the capabilities in that case. Without doing so the CPU settings
for a guest may not add the vmx=on depending on configuration and
for the user that doesn't make sense. There is a private bz on this
so I won't bother posting it.
Patch2 and Patch3 make use of the 'service libvirtd reload' function
in order to invalidate all the entries in the internal QEMU capabilities
hash table and then to force a reread. This perhaps has downsides related
to guest usage and previous means to use reload and not refresh if a guest
was running. On the other hand, we tell people to just clear the QEMU
capabilities cache (e.g. rm /var/cache/libvirt/qemu/capabilities/*.xml)
and restart libvirtd, so in essence, the same result. It's not clear
how frequently this is used (it's essentially a SIGHUP to libvirtd).
IMHO the fact that we cache stuff should be completely invisible outside
of libvirt. Sure we've had some bugs in this area, but they are not very
frequent so I'm not enthusiastic to expose any knob to force rebuild beyond
just deleting files.

Regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
John Ferlan
2018-11-14 19:22:12 UTC
Permalink
Post by Daniel P. Berrangé
Sending as an RFC primarily because I'm looking for whether either
or both mechanisms in the series is more or less desired. Likewise,
if it's felt that the current process of telling customers to just
delete the cache is acceptible, then so be it. If there's other ideas
I'm willing to give them a go too. I did consider adding a virsh
option to "virsh capabilities" (still possible) and/or a virt-admin
option to force the refresh. These just were "easier" and didn't
require an API adjustment to implement.
Patch1 is essentially a means to determine if the kernel config
was changed to allow nested virtualization and to force a refresh
of the capabilities in that case. Without doing so the CPU settings
for a guest may not add the vmx=on depending on configuration and
for the user that doesn't make sense. There is a private bz on this
so I won't bother posting it.
Patch2 and Patch3 make use of the 'service libvirtd reload' function
in order to invalidate all the entries in the internal QEMU capabilities
hash table and then to force a reread. This perhaps has downsides related
to guest usage and previous means to use reload and not refresh if a guest
was running. On the other hand, we tell people to just clear the QEMU
capabilities cache (e.g. rm /var/cache/libvirt/qemu/capabilities/*.xml)
and restart libvirtd, so in essence, the same result. It's not clear
how frequently this is used (it's essentially a SIGHUP to libvirtd).
IMHO the fact that we cache stuff should be completely invisible outside
of libvirt. Sure we've had some bugs in this area, but they are not very
frequent so I'm not enthusiastic to expose any knob to force rebuild beyond
just deleting files.
OK - so that more or less obviates patch2 and patch3...

Of course the fact that we cache stuff hasn't been completely invisible
and telling someone to fix the problem by "simply" removing the cache
files and pointing them to the cache location seems a bit "awkward" once
you figure out that is the problem of course. Many times it's just not
intuitively obvious!

OTOH, baking in the idea that a "reload" could remove the chance that
caching was the problem could be useful. I guess it just felt like it
was a perhaps less used option. Assuming of course most would use
stop/start or restart instead.

John
Daniel P. Berrangé
2018-11-15 09:50:16 UTC
Permalink
Post by John Ferlan
Post by Daniel P. Berrangé
Sending as an RFC primarily because I'm looking for whether either
or both mechanisms in the series is more or less desired. Likewise,
if it's felt that the current process of telling customers to just
delete the cache is acceptible, then so be it. If there's other ideas
I'm willing to give them a go too. I did consider adding a virsh
option to "virsh capabilities" (still possible) and/or a virt-admin
option to force the refresh. These just were "easier" and didn't
require an API adjustment to implement.
Patch1 is essentially a means to determine if the kernel config
was changed to allow nested virtualization and to force a refresh
of the capabilities in that case. Without doing so the CPU settings
for a guest may not add the vmx=on depending on configuration and
for the user that doesn't make sense. There is a private bz on this
so I won't bother posting it.
Patch2 and Patch3 make use of the 'service libvirtd reload' function
in order to invalidate all the entries in the internal QEMU capabilities
hash table and then to force a reread. This perhaps has downsides related
to guest usage and previous means to use reload and not refresh if a guest
was running. On the other hand, we tell people to just clear the QEMU
capabilities cache (e.g. rm /var/cache/libvirt/qemu/capabilities/*.xml)
and restart libvirtd, so in essence, the same result. It's not clear
how frequently this is used (it's essentially a SIGHUP to libvirtd).
IMHO the fact that we cache stuff should be completely invisible outside
of libvirt. Sure we've had some bugs in this area, but they are not very
frequent so I'm not enthusiastic to expose any knob to force rebuild beyond
just deleting files.
OK - so that more or less obviates patch2 and patch3...
Of course the fact that we cache stuff hasn't been completely invisible
and telling someone to fix the problem by "simply" removing the cache
files and pointing them to the cache location seems a bit "awkward" once
you figure out that is the problem of course. Many times it's just not
intuitively obvious!
OTOH, baking in the idea that a "reload" could remove the chance that
caching was the problem could be useful. I guess it just felt like it
was a perhaps less used option. Assuming of course most would use
stop/start or restart instead.
Looking at this from a more general POV, cache invalidation bugs are
just one of many different bugs that can & have impacted libvirt over
the years. Adding a force reload API is essentially saying we want to
have

virConnectWhackPossibleBug()

because we think we might have a future bug. I don't think that is a
good design precedent or rationale - essentially its admitting failure.
If caching really were so terribly implemented that this is considered
needed, then I'd argue caching should be deleted. I don't think we are
in such a bad case though - the kind of problems have been fairly
niche in impact.

Regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
Marc Hartmayer
2018-11-14 10:05:12 UTC
Permalink
Post by John Ferlan
Support for nested kvm is handled via a kernel module configuration
adjustment which if done after libvirtd is started and/or the last
QEMU capabilities adjustment can result in the inability to start a
guest and use nested kvm until the capabilities cache is invalidated.
Thus, let's fetch and save the setting during initialization and then
when the capabilities are checked for various host related adjustments
that could affect whether the capabilities cache is updated add a check
whether the nested value was set for either kvm_intel or kvm_amd to
force a refetch of the capabilities.
---
src/qemu/qemu_capabilities.c | 43 ++++++++++++++++++++++++++++++++++++
src/qemu/qemu_capspriv.h | 2 ++
tests/qemucapabilitiestest.c | 3 +++
3 files changed, 48 insertions(+)
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 2ca5af3297..ebe0c0c7df 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -40,6 +40,7 @@
#include "virnodesuspend.h"
#include "virnuma.h"
#include "virhostcpu.h"
+#include "virkmod.h"
#include "qemu_monitor.h"
#include "virstring.h"
#include "qemu_hostdev.h"
@@ -551,6 +552,7 @@ struct _virQEMUCaps {
virObject parent;
bool usedQMP;
+ bool isNested;
char *binary;
time_t ctime;
@@ -1512,6 +1514,7 @@ virQEMUCapsPtr virQEMUCapsNewCopy(virQEMUCapsPtr qemuCaps)
return NULL;
ret->usedQMP = qemuCaps->usedQMP;
+ ret->isNested = qemuCaps->isNested;
if (VIR_STRDUP(ret->binary, qemuCaps->binary) < 0)
goto error;
@@ -3567,6 +3570,9 @@ virQEMUCapsLoadCache(virArch hostArch,
virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_KVM);
virQEMUCapsInitHostCPUModel(qemuCaps, hostArch, VIR_DOMAIN_VIRT_QEMU);
+ qemuCaps->isNested = virXPathBoolean("count(./isNested) > 0",
+ ctxt) > 0;
+
ret = 0;
VIR_FREE(str);
@@ -3786,6 +3792,9 @@ virQEMUCapsFormatCache(virQEMUCapsPtr qemuCaps)
if (qemuCaps->sevCapabilities)
virQEMUCapsFormatSEVInfo(qemuCaps, &buf);
+ if (qemuCaps->isNested)
+ virBufferAddLit(&buf, "<isNested/>\n");
+
virBufferAdjustIndent(&buf, -2);
virBufferAddLit(&buf, "</qemuCaps>\n");
@@ -3826,6 +3835,28 @@ virQEMUCapsSaveFile(void *data,
}
+static bool
+virQEMUCapsIsNested(void)
Not sure if “isNested” is the best wording… Since we’re talking about
nested KVM support here.
Post by John Ferlan
+{
+ VIR_AUTOFREE(char *) kConfig = NULL;
+
+ if ((kConfig = virKModConfig()) &&
+ (strstr(kConfig, "kvm_intel nested=1") ||
+ strstr(kConfig, "kvm_amd nested=1")))
Please add a check for "kvm nested=1" here since this is used by s390x :)
Post by John Ferlan
+ return true;
+ return false;
+}
+
+
+void
+virQEMUCapsClearIsNested(virQEMUCapsPtr qemuCaps)
+{
+ /* For qemucapabilitiestest to avoid printing the </isNested> on
+ * hosts with nested set in the kernel */
+ qemuCaps->isNested = false;
+}
+
+
static bool
virQEMUCapsIsValid(void *data,
void *privData)
@@ -3834,6 +3865,7 @@ virQEMUCapsIsValid(void *data,
virQEMUCapsCachePrivPtr priv = privData;
bool kvmUsable;
struct stat sb;
+ bool isNested;
if (!qemuCaps->binary)
return true;
@@ -3866,6 +3898,15 @@ virQEMUCapsIsValid(void *data,
return false;
}
+ /* Check if someone changed the nested={0|1} value for the kernel from
+ * the previous time we checked. If so, then refresh the capabilities. */
+ isNested = virQEMUCapsIsNested();
+ if (isNested != qemuCaps->isNested) {
+ VIR_WARN("changed kernel nested kvm value was %d", qemuCaps->isNested);
+ qemuCaps->isNested = isNested;
+ return false;
+ }
+
if (!virQEMUCapsGuestIsNative(priv->hostArch, qemuCaps->arch)) {
VIR_DEBUG("Guest arch (%s) is not native to host arch (%s), "
"skipping KVM-related checks",
@@ -4452,6 +4493,8 @@ virQEMUCapsInitQMP(virQEMUCapsPtr qemuCaps,
if (virQEMUCapsInitQMPMonitor(qemuCaps, cmd->mon) < 0)
goto cleanup;
+ qemuCaps->isNested = virQEMUCapsIsNested();
+
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_KVM)) {
virQEMUCapsInitQMPCommandAbort(cmd);
if ((rc = virQEMUCapsInitQMPCommandRun(cmd, true)) != 0) {
diff --git a/src/qemu/qemu_capspriv.h b/src/qemu/qemu_capspriv.h
index 8d1a40fe74..b5d6aae2e5 100644
--- a/src/qemu/qemu_capspriv.h
+++ b/src/qemu/qemu_capspriv.h
@@ -48,6 +48,8 @@ int
virQEMUCapsInitQMPMonitor(virQEMUCapsPtr qemuCaps,
qemuMonitorPtr mon);
+void virQEMUCapsClearIsNested(virQEMUCapsPtr qemuCaps);
+
int
virQEMUCapsInitQMPMonitorTCG(virQEMUCapsPtr qemuCaps,
qemuMonitorPtr mon);
diff --git a/tests/qemucapabilitiestest.c b/tests/qemucapabilitiestest.c
index 8fe5a55e1d..703fb6a125 100644
--- a/tests/qemucapabilitiestest.c
+++ b/tests/qemucapabilitiestest.c
@@ -63,6 +63,9 @@ testQemuCaps(const void *opaque)
qemuMonitorTestGetMonitor(mon)) < 0)
goto cleanup;
+ /* Don't apply what the host has... force clear for testing purposes */
+ virQEMUCapsClearIsNested(capsActual);
+
if (virQEMUCapsGet(capsActual, QEMU_CAPS_KVM)) {
qemuMonitorResetCommandID(qemuMonitorTestGetMonitor(mon));
if (virQEMUCapsInitQMPMonitorTCG(capsActual,
--
2.17.2
--
libvir-list mailing list
https://www.redhat.com/mailman/listinfo/libvir-list
--
Kind regards / Beste Grüße
Marc Hartmayer

IBM Deutschland Research & Development GmbH
Vorsitzende des Aufsichtsrats: Martina Koederitz
Geschäftsführung: Dirk Wittkopp
Sitz der Gesellschaft: Böblingen
Registergericht: Amtsgericht Stuttgart, HRB 243294
Loading...