diff options
Diffstat (limited to 'testcases/kernel/kvm/include')
-rw-r--r-- | testcases/kernel/kvm/include/kvm_common.h | 8 | ||||
-rw-r--r-- | testcases/kernel/kvm/include/kvm_guest.h | 14 | ||||
-rw-r--r-- | testcases/kernel/kvm/include/kvm_host.h | 20 | ||||
-rw-r--r-- | testcases/kernel/kvm/include/kvm_x86.h | 72 | ||||
-rw-r--r-- | testcases/kernel/kvm/include/kvm_x86_svm.h | 166 |
5 files changed, 278 insertions, 2 deletions
diff --git a/testcases/kernel/kvm/include/kvm_common.h b/testcases/kernel/kvm/include/kvm_common.h index 4e81d8302..377e3f6aa 100644 --- a/testcases/kernel/kvm/include/kvm_common.h +++ b/testcases/kernel/kvm/include/kvm_common.h @@ -11,6 +11,14 @@ #define KVM_TNONE -1 /* "No result" status value */ /* + * Result value for asynchronous notifications between guest and host. + * Do not use this value directly. Call tst_signal_host() or tst_wait_host() + * in guest code. The notification must be handled by another host thread + * and then the result value must be reset to KVM_TNONE. + */ +#define KVM_TSYNC 0xfe + +/* * Result value indicating end of test. If the test program exits using * the HLT instruction with any valid result value other than KVM_TEXIT or * TBROK, KVM runner will automatically resume VM execution after printing diff --git a/testcases/kernel/kvm/include/kvm_guest.h b/testcases/kernel/kvm/include/kvm_guest.h index ec13c5845..96f246155 100644 --- a/testcases/kernel/kvm/include/kvm_guest.h +++ b/testcases/kernel/kvm/include/kvm_guest.h @@ -64,6 +64,20 @@ void tst_brk_(const char *file, const int lineno, int result, const char *message) __attribute__((noreturn)); #define tst_brk(result, msg) tst_brk_(__FILE__, __LINE__, (result), (msg)) +/* + * Send asynchronous notification to host without stopping VM execution and + * return immediately. The notification must be handled by another host thread. + * The data argument will be passed to host in test_result->file_addr and + * can be used to send additional data both ways. + */ +void tst_signal_host(void *data); + +/* + * Call tst_signal_host(data) and wait for host to call + * tst_kvm_clear_guest_signal(). + */ +void tst_wait_host(void *data); + void *tst_heap_alloc_aligned(size_t size, size_t align); void *tst_heap_alloc(size_t size); diff --git a/testcases/kernel/kvm/include/kvm_host.h b/testcases/kernel/kvm/include/kvm_host.h index 2359944fd..06bcb5d45 100644 --- a/testcases/kernel/kvm/include/kvm_host.h +++ b/testcases/kernel/kvm/include/kvm_host.h @@ -125,13 +125,29 @@ struct kvm_cpuid2 *tst_kvm_get_cpuid(int sysfd); void tst_kvm_create_instance(struct tst_kvm_instance *inst, size_t ram_size); /* - * Execute the given KVM instance and print results. + * Execute the given KVM instance and print results. If ioctl(KVM_RUN) is + * expected to fail, pass the expected error code in exp_errno, otherwise + * set it to zero. Returns last value returned by ioctl(KVM_RUN). */ -void tst_kvm_run_instance(struct tst_kvm_instance *inst); +int tst_kvm_run_instance(struct tst_kvm_instance *inst, int exp_errno); /* * Close the given KVM instance. */ void tst_kvm_destroy_instance(struct tst_kvm_instance *inst); +/* + * Wait for given VM to call tst_signal_host() or tst_wait_host(). Timeout + * value is in milliseconds. Zero means no wait, negative value means wait + * forever. Returns 0 if signal was received, KVM_TEXIT if the VM exited + * without sending a signal, or -1 if timeout was reached. + */ +int tst_kvm_wait_guest(struct tst_kvm_instance *inst, int timeout_ms); + +/* + * Clear VM signal sent by tst_signal_host(). If the VM is waiting + * in tst_wait_host(), this function will signal the VM to resume execution. + */ +void tst_kvm_clear_guest_signal(struct tst_kvm_instance *inst); + #endif /* KVM_HOST_H_ */ diff --git a/testcases/kernel/kvm/include/kvm_x86.h b/testcases/kernel/kvm/include/kvm_x86.h index 4f3671135..bc36c0e0f 100644 --- a/testcases/kernel/kvm/include/kvm_x86.h +++ b/testcases/kernel/kvm/include/kvm_x86.h @@ -10,6 +10,9 @@ #include "kvm_test.h" +#define PAGESIZE 0x1000 +#define KVM_GDT_SIZE 32 + /* Interrupts */ #define X86_INTR_COUNT 256 @@ -38,19 +41,48 @@ #define INTR_SECURITY_ERROR 30 +/* Segment descriptor flags */ +#define SEGTYPE_LDT 0x02 +#define SEGTYPE_TSS 0x09 +#define SEGTYPE_TSS_BUSY 0x0b +#define SEGTYPE_CALL_GATE 0x0c +#define SEGTYPE_INTR_GATE 0x0e +#define SEGTYPE_TRAP_GATE 0x0f +#define SEGTYPE_RODATA 0x10 +#define SEGTYPE_RWDATA 0x12 +#define SEGTYPE_STACK 0x16 +#define SEGTYPE_CODE 0x1a +#define SEGTYPE_MASK 0x1f + +#define SEGFLAG_NSYSTEM 0x10 +#define SEGFLAG_PRESENT 0x80 +#define SEGFLAG_CODE64 0x200 +#define SEGFLAG_32BIT 0x400 +#define SEGFLAG_PAGE_LIMIT 0x800 + + /* CPUID constants */ #define CPUID_GET_INPUT_RANGE 0x80000000 #define CPUID_GET_EXT_FEATURES 0x80000001 +#define CPUID_GET_SVM_FEATURES 0x8000000a /* Model-specific CPU register constants */ #define MSR_EFER 0xc0000080 +#define MSR_VM_CR 0xc0010114 +#define MSR_VM_HSAVE_PA 0xc0010117 #define EFER_SCE (1 << 0) /* SYSCALL/SYSRET instructions enabled */ #define EFER_LME (1 << 8) /* CPU is running in 64bit mode */ #define EFER_LMA (1 << 10) /* CPU uses 64bit memory paging (read-only) */ #define EFER_NXE (1 << 11) /* Execute disable bit active */ +#define EFER_SVME (1 << 12) /* AMD SVM instructions enabled */ +#define VM_CR_DPD (1 << 0) +#define VM_CR_R_INIT (1 << 1) +#define VM_CR_DIS_A20M (1 << 2) +#define VM_CR_LOCK (1 << 3) +#define VM_CR_SVMDIS (1 << 4) /* Control register constants */ #define CR4_VME (1 << 0) @@ -91,6 +123,25 @@ struct intr_descriptor { #endif /* defined(__x86_64__) */ } __attribute__((__packed__)); +struct segment_descriptor { + unsigned int limit_lo : 16; + unsigned int baseaddr_lo : 24; + unsigned int flags_lo : 8; + unsigned int limit_hi : 4; + unsigned int flags_hi : 4; + unsigned int baseaddr_hi : 8; +} __attribute__((__packed__)); + +struct segment_descriptor64 { + unsigned int limit_lo : 16; + unsigned int baseaddr_lo : 24; + unsigned int flags_lo : 8; + unsigned int limit_hi : 4; + unsigned int flags_hi : 4; + uint64_t baseaddr_hi : 40; + uint32_t reserved; +} __attribute__((__packed__)); + struct page_table_entry_pae { unsigned int present: 1; unsigned int writable: 1; @@ -116,15 +167,36 @@ struct kvm_cregs { unsigned long cr0, cr2, cr3, cr4; }; +struct kvm_sregs { + uint16_t cs, ds, es, fs, gs, ss; +}; + +struct kvm_regs64 { + uint64_t rax, rbx, rcx, rdx, rdi, rsi, rbp, rsp; + uint64_t r8, r9, r10, r11, r12, r13, r14, r15; +}; + extern struct page_table_entry_pae kvm_pagetable[]; extern struct intr_descriptor kvm_idt[X86_INTR_COUNT]; +extern struct segment_descriptor kvm_gdt[KVM_GDT_SIZE]; /* Page table helper functions */ uintptr_t kvm_get_page_address_pae(const struct page_table_entry_pae *entry); +/* Segment descriptor table functions */ +void kvm_set_segment_descriptor(struct segment_descriptor *dst, + uint64_t baseaddr, uint32_t limit, unsigned int flags); +void kvm_parse_segment_descriptor(struct segment_descriptor *src, + uint64_t *baseaddr, uint32_t *limit, unsigned int *flags); +int kvm_find_free_descriptor(const struct segment_descriptor *table, + size_t size); +unsigned int kvm_create_stack_descriptor(struct segment_descriptor *table, + size_t tabsize, void *stack_base); + /* Functions for querying CPU info and status */ void kvm_get_cpuid(unsigned int eax, unsigned int ecx, struct kvm_cpuid *buf); void kvm_read_cregs(struct kvm_cregs *buf); +void kvm_read_sregs(struct kvm_sregs *buf); uint64_t kvm_rdmsr(unsigned int msr); void kvm_wrmsr(unsigned int msr, uint64_t value); diff --git a/testcases/kernel/kvm/include/kvm_x86_svm.h b/testcases/kernel/kvm/include/kvm_x86_svm.h new file mode 100644 index 000000000..b4b1b80e2 --- /dev/null +++ b/testcases/kernel/kvm/include/kvm_x86_svm.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2023 SUSE LLC <mdoucha@suse.cz> + * + * x86-specific KVM helper functions and structures for AMD SVM + */ + +#ifndef KVM_X86_SVM_H_ +#define KVM_X86_SVM_H_ + +#include "kvm_x86.h" + +/* CPUID_GET_SVM_FEATURES flags returned in EDX */ +#define SVM_CPUID_NESTED_PAGING (1 << 0) +#define SVM_CPUID_LBR_VIRT (1 << 1) +#define SVM_CPUID_LOCK (1 << 2) +#define SVM_CPUID_NRIP_SAVE (1 << 3) +#define SVM_CPUID_TSC_RATE_MSR (1 << 4) +#define SVM_CPUID_VMCB_CLEAN (1 << 5) +#define SVM_CPUID_FLUSH_ASID (1 << 6) +#define SVM_CPUID_DECODE_ASSIST (1 << 7) +#define SVM_CPUID_PAUSE_FILTER (1 << 10) +#define SVM_CPUID_PAUSE_THRESHOLD (1 << 12) +#define SVM_CPUID_AVIC (1 << 13) +#define SVM_CPUID_VMSAVE_VIRT (1 << 15) +#define SVM_CPUID_VGIF (1 << 16) +#define SVM_CPUID_GMET (1 << 17) +#define SVM_CPUID_X2AVIC (1 << 18) +#define SVM_CPUID_SSSCHECK (1 << 19) +#define SVM_CPUID_SPEC_CTRL (1 << 20) +#define SVM_CPUID_ROGPT (1 << 21) +#define SVM_CPUID_HOST_MCE_OVERRIDE (1 << 23) +#define SVM_CPUID_TLBI_CTL (1 << 24) +#define SVM_CPUID_NMI_VIRT (1 << 25) +#define SVM_CPUID_IBS_VIRT (1 << 26) + +/* SVM event intercept IDs */ +#define SVM_INTERCEPT_HLT 0x78 +#define SVM_INTERCEPT_VMRUN 0x80 +#define SVM_INTERCEPT_VMLOAD 0x82 +#define SVM_INTERCEPT_VMSAVE 0x83 +#define SVM_INTERCEPT_STGI 0x84 +#define SVM_INTERCEPT_CLGI 0x85 +#define SVM_INTERCEPT_MAX 0x95 + +/* SVM vmrun exit codes */ +#define SVM_EXIT_HLT 0x78 +#define SVM_EXIT_VMRUN 0x80 +#define SVM_EXIT_VMLOAD 0x82 +#define SVM_EXIT_VMSAVE 0x83 +#define SVM_EXIT_STGI 0x84 +#define SVM_EXIT_CLGI 0x85 +#define SVM_EXIT_AVIC_NOACCEL 0x402 +#define SVM_EXIT_INVALID ((uint64_t)-1) + +/* SVM VMCB flags */ +#define SVM_INTR_AVIC (1 << 7) + +struct kvm_vmcb_descriptor { + uint16_t selector; + uint16_t attrib; + uint32_t limit; + uint64_t base; +}; + +struct kvm_vmcb { + /* VMCB control area */ + uint8_t intercepts[20]; + uint8_t reserved1[44]; + uint64_t iopm_base_addr; + uint64_t msrpm_base_addr; + uint64_t tsc_offset; + uint32_t guest_asid; + uint32_t tlb_control; + uint8_t virtual_tpr; + uint8_t virtual_irq; + unsigned char virt_intr_prio: 4; + unsigned char virt_ignore_tpr: 4; + uint8_t virt_intr_ctl; + uint8_t virt_intr_vector; + uint8_t reserved2[3]; + uint64_t interrupt_shadow; + uint64_t exitcode; + uint64_t exitinfo1; + uint64_t exitinfo2; + uint64_t exit_int_info; + uint64_t enable_nested_paging; + uint64_t avic_bar; + uint64_t ghcb_gpa; + uint64_t event_injection; + uint64_t nested_cr3; + uint64_t virt_ext; + uint32_t vmcb_clean; + uint8_t reserved3[4]; + uint64_t next_rip; + uint8_t instr_len; + uint8_t instr_bytes[15]; + uint64_t avic_backing_page; + uint8_t reserved4[8]; + uint64_t avic_logical_ptr; + uint64_t avic_physical_ptr; + uint8_t reserved5[8]; + uint64_t vmsa_pa; + uint64_t vmgexit_rax; + uint8_t vmgexit_cpl; + uint8_t reserved6[0x2e7]; + + /* VMCB state save area */ + struct kvm_vmcb_descriptor es, cs, ss, ds, fs, gs; + struct kvm_vmcb_descriptor gdtr, ldtr, idtr, tr; + uint8_t reserved7[43]; + uint8_t cpl; + uint8_t reserved8[4]; + uint64_t efer; + uint8_t reserved9[112]; + uint64_t cr4; + uint64_t cr3; + uint64_t cr0; + uint64_t dr7; + uint64_t dr6; + uint64_t rflags; + uint64_t rip; + uint8_t reserved10[88]; + uint64_t rsp; + uint64_t s_cet; + uint64_t ssp; + uint64_t isst_addr; + uint64_t rax; + uint64_t star; + uint64_t lstar; + uint64_t cstar; + uint64_t sfmask; + uint64_t kernel_gs_base; + uint64_t sysenter_cs; + uint64_t sysenter_esp; + uint64_t sysenter_eip; + uint64_t cr2; + uint8_t reserved11[32]; + uint64_t guest_pat; + uint8_t padding[0x990]; +}; + +struct kvm_svm_vcpu { + struct kvm_vmcb *vmcb; + struct kvm_regs64 regs; +}; + +/* AMD SVM virtualization helper functions */ +int kvm_is_svm_supported(void); +int kvm_get_svm_state(void); +void kvm_set_svm_state(int enabled); + +void kvm_init_svm(void); /* Fully initialize host SVM environment */ +struct kvm_vmcb *kvm_alloc_vmcb(void); +void kvm_vmcb_copy_gdt_descriptor(struct kvm_vmcb_descriptor *dst, + unsigned int gdt_id); +void kvm_vmcb_set_intercept(struct kvm_vmcb *vmcb, unsigned int id, + unsigned int state); +void kvm_init_guest_vmcb(struct kvm_vmcb *vmcb, uint32_t asid, uint16_t ss, + void *rsp, int (*guest_main)(void)); +struct kvm_svm_vcpu *kvm_create_svm_vcpu(int (*guest_main)(void), + int alloc_stack); + +void kvm_svm_vmrun(struct kvm_svm_vcpu *cpu); + +#endif /* KVM_X86_SVM_H_ */ |