aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
authorPaolo Bonzini <pbonzini@redhat.com>2025-03-19 09:46:59 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2025-04-07 07:36:33 -0400
commitfd02aa45bda6d2f2fedcab70e828867332ef7e1c (patch)
tree23df59a19752449f046d9597d4b6c99b760912cd /virt
parent7d7685631a0e8cc129c0b2411e95788f2d45226e (diff)
parent7bcf7246c42a81e77fbe18a0a3e7c2813c1690a6 (diff)
downloadlinux-fd02aa45bda6d2f2fedcab70e828867332ef7e1c.tar.gz
Merge branch 'kvm-tdx-initial' into HEAD
This large commit contains the initial support for TDX in KVM. All x86 parts enable the host-side hypercalls that KVM uses to talk to the TDX module, a software component that runs in a special CPU mode called SEAM (Secure Arbitration Mode). The series is in turn split into multiple sub-series, each with a separate merge commit: - Initialization: basic setup for using the TDX module from KVM, plus ioctls to create TDX VMs and vCPUs. - MMU: in TDX, private and shared halves of the address space are mapped by different EPT roots, and the private half is managed by the TDX module. Using the support that was added to the generic MMU code in 6.14, add support for TDX's secure page tables to the Intel side of KVM. Generic KVM code takes care of maintaining a mirror of the secure page tables so that they can be queried efficiently, and ensuring that changes are applied to both the mirror and the secure EPT. - vCPU enter/exit: implement the callbacks that handle the entry of a TDX vCPU (via the SEAMCALL TDH.VP.ENTER) and the corresponding save/restore of host state. - Userspace exits: introduce support for guest TDVMCALLs that KVM forwards to userspace. These correspond to the usual KVM_EXIT_* "heavyweight vmexits" but are triggered through a different mechanism, similar to VMGEXIT for SEV-ES and SEV-SNP. - Interrupt handling: support for virtual interrupt injection as well as handling VM-Exits that are caused by vectored events. Exclusive to TDX are machine-check SMIs, which the kernel already knows how to handle through the kernel machine check handler (commit 7911f145de5f, "x86/mce: Implement recovery for errors in TDX/SEAM non-root mode") - Loose ends: handling of the remaining exits from the TDX module, including EPT violation/misconfig and several TDVMCALL leaves that are handled in the kernel (CPUID, HLT, RDMSR/WRMSR, GetTdVmCallInfo); plus returning an error or ignoring operations that are not supported by TDX guests Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/dirty_ring.c11
-rw-r--r--virt/kvm/kvm_main.c26
2 files changed, 15 insertions, 22 deletions
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 7bc74969a819af..d14ffc7513eee9 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -11,14 +11,14 @@
#include <trace/events/kvm.h>
#include "kvm_mm.h"
-int __weak kvm_cpu_dirty_log_size(void)
+int __weak kvm_cpu_dirty_log_size(struct kvm *kvm)
{
return 0;
}
-u32 kvm_dirty_ring_get_rsvd_entries(void)
+u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
{
- return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
+ return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size(kvm);
}
bool kvm_use_dirty_bitmap(struct kvm *kvm)
@@ -74,14 +74,15 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
KVM_MMU_UNLOCK(kvm);
}
-int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
+int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size)
{
ring->dirty_gfns = vzalloc(size);
if (!ring->dirty_gfns)
return -ENOMEM;
ring->size = size / sizeof(struct kvm_dirty_gfn);
- ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
+ ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries(kvm);
ring->dirty_index = 0;
ring->reset_index = 0;
ring->index = index;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e85b33a92624d9..69782df3617fde 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -143,8 +143,6 @@ static int kvm_no_compat_open(struct inode *inode, struct file *file)
#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
.open = kvm_no_compat_open
#endif
-static int kvm_enable_virtualization(void);
-static void kvm_disable_virtualization(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
@@ -4126,7 +4124,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
goto vcpu_free_run_page;
if (kvm->dirty_ring_size) {
- r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
+ r = kvm_dirty_ring_alloc(kvm, &vcpu->dirty_ring,
id, kvm->dirty_ring_size);
if (r)
goto arch_vcpu_destroy;
@@ -4864,7 +4862,7 @@ static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
return -EINVAL;
/* Should be bigger to keep the reserved entries, or a page */
- if (size < kvm_dirty_ring_get_rsvd_entries() *
+ if (size < kvm_dirty_ring_get_rsvd_entries(kvm) *
sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
return -EINVAL;
@@ -5479,8 +5477,9 @@ static struct miscdevice kvm_dev = {
};
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-static bool enable_virt_at_load = true;
+bool enable_virt_at_load = true;
module_param(enable_virt_at_load, bool, 0444);
+EXPORT_SYMBOL_GPL(enable_virt_at_load);
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
@@ -5589,7 +5588,7 @@ static struct syscore_ops kvm_syscore_ops = {
.shutdown = kvm_shutdown,
};
-static int kvm_enable_virtualization(void)
+int kvm_enable_virtualization(void)
{
int r;
@@ -5634,8 +5633,9 @@ err_cpuhp:
--kvm_usage_count;
return r;
}
+EXPORT_SYMBOL_GPL(kvm_enable_virtualization);
-static void kvm_disable_virtualization(void)
+void kvm_disable_virtualization(void)
{
guard(mutex)(&kvm_usage_lock);
@@ -5646,6 +5646,7 @@ static void kvm_disable_virtualization(void)
cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
kvm_arch_disable_virtualization();
}
+EXPORT_SYMBOL_GPL(kvm_disable_virtualization);
static int kvm_init_virtualization(void)
{
@@ -5661,21 +5662,11 @@ static void kvm_uninit_virtualization(void)
kvm_disable_virtualization();
}
#else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
-static int kvm_enable_virtualization(void)
-{
- return 0;
-}
-
static int kvm_init_virtualization(void)
{
return 0;
}
-static void kvm_disable_virtualization(void)
-{
-
-}
-
static void kvm_uninit_virtualization(void)
{
@@ -5864,6 +5855,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
+EXPORT_SYMBOL_GPL(kvm_io_bus_read);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)