summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFuad Tabba <tabba@google.com>2022-05-05 07:56:32 +0000
committerFuad Tabba <tabba@google.com>2022-05-16 13:51:52 +0000
commitb6193c56859c1ba440d1029968eb379a26bf9f30 (patch)
tree8ca1fc14012ece9ed027a60559292d93e0454be7
parent729adca51ad544cb856b15791bfba15c57b99135 (diff)
downloadgs-b6193c56859c1ba440d1029968eb379a26bf9f30.tar.gz
ANDROID: KVM: arm64: pkvm: Ensure that TLBs and I-cache are private to each vcpu
If a different vcpu from the same vm is loaded on the same physical CPU, we must flush the CPU context. This patch ensures that by tracking the vcpu that was last loaded on this CPU, and flushes if that changes. This could lead to over-invalidation, which could affect performance but not correctness. Bug: 228810735 Signed-off-by: Fuad Tabba <tabba@google.com> Change-Id: I70976007165ca3b8d293089dbf9c2111b01ca2f7
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index de191428cccf..d97988b941ae 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -226,6 +226,11 @@ static int index_to_shadow_handle(int index)
extern unsigned long hyp_nr_cpus;
/*
+ * Track the vcpu most recently loaded on each physical CPU.
+ */
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_loaded_vcpu);
+
+/*
* Spinlock for protecting the shadow table related state.
* Protects writes to shadow_table, num_shadow_entries, and next_shadow_alloc,
* as well as reads and writes to last_shadow_vcpu_lookup.
@@ -267,6 +272,7 @@ struct kvm_vcpu *get_shadow_vcpu(int shadow_handle, unsigned int vcpu_idx)
{
struct kvm_vcpu *vcpu = NULL;
struct kvm_shadow_vm *vm;
+ bool flush_context = false;
hyp_spin_lock(&shadow_lock);
vm = find_shadow_by_handle(shadow_handle);
@@ -279,12 +285,28 @@ struct kvm_vcpu *get_shadow_vcpu(int shadow_handle, unsigned int vcpu_idx)
vcpu = NULL;
goto unlock;
}
+
+ /*
+ * Guarantee that both TLBs and I-cache are private to each vcpu.
+ * The check below is conservative and could lead to over-invalidation,
+ * because there is no need to nuke the contexts if the vcpu belongs to
+ * a different vm.
+ */
+ if (vcpu != __this_cpu_read(last_loaded_vcpu)) {
+ flush_context = true;
+ __this_cpu_write(last_loaded_vcpu, vcpu);
+ }
+
vcpu->arch.pkvm.loaded_on_cpu = true;
hyp_page_ref_inc(hyp_virt_to_page(vm));
unlock:
hyp_spin_unlock(&shadow_lock);
+ /* No need for the lock while flushing the context. */
+ if (flush_context)
+ __kvm_flush_cpu_context(vcpu->arch.hw_mmu);
+
return vcpu;
}
@@ -695,6 +717,7 @@ int __pkvm_teardown_shadow(int shadow_handle)
u64 pfn;
u64 nr_pages;
void *addr;
+ int i;
/* Lookup then remove entry from the shadow table. */
hyp_spin_lock(&shadow_lock);
@@ -709,6 +732,19 @@ int __pkvm_teardown_shadow(int shadow_handle)
goto err_unlock;
}
+ /*
+ * Clear the tracking for last_loaded_vcpu for all cpus for this vm in
+ * case the same addresses for those vcpus are reused for future vms.
+ */
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ struct kvm_vcpu **last_loaded_vcpu_ptr =
+ per_cpu_ptr(&last_loaded_vcpu, i);
+ struct kvm_vcpu *vcpu = *last_loaded_vcpu_ptr;
+
+ if (vcpu && vcpu->arch.pkvm.shadow_vm == vm)
+ *last_loaded_vcpu_ptr = NULL;
+ }
+
/* Ensure the VMID is clean before it can be reallocated */
__kvm_tlb_flush_vmid(&vm->arch.mmu);
remove_shadow_table(shadow_handle);