aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv/hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv/hv.c')
-rw-r--r--drivers/hv/hv.c131
1 files changed, 117 insertions, 14 deletions
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index de6708dbe0df48..51e5018ac9b26c 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
+#include <linux/set_memory.h>
#include "hyperv_vmbus.h"
/* The one and only */
@@ -56,20 +57,37 @@ int hv_post_message(union hv_connection_id connection_id,
local_irq_save(flags);
- aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ /*
+ * A TDX VM with the paravisor must use the decrypted post_msg_page: see
+ * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor
+ * can use the encrypted hyperv_pcpu_input_arg because it copies the
+ * input into the GHCB page, which has been decrypted by the paravisor.
+ */
+ if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present)
+ aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
+ else
+ aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- if (hv_isolation_type_snp())
- status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
- (void *)aligned_msg, NULL,
- sizeof(*aligned_msg));
- else
+ if (ms_hyperv.paravisor_present) {
+ if (hv_isolation_type_tdx())
+ status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
+ virt_to_phys(aligned_msg), 0);
+ else if (hv_isolation_type_snp())
+ status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+ aligned_msg, NULL,
+ sizeof(*aligned_msg));
+ else
+ status = HV_STATUS_INVALID_PARAMETER;
+ } else {
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
aligned_msg, NULL);
+ }
local_irq_restore(flags);
@@ -78,7 +96,7 @@ int hv_post_message(union hv_connection_id connection_id,
int hv_synic_alloc(void)
{
- int cpu;
+ int cpu, ret = -ENOMEM;
struct hv_per_cpu_context *hv_cpu;
/*
@@ -104,11 +122,29 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+ hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->post_msg_page == NULL) {
+ pr_err("Unable to allocate post msg page\n");
+ goto err;
+ }
+
+ ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt post msg page: %d\n", ret);
+ /* Just leak the page, as it's unsafe to free the page. */
+ hv_cpu->post_msg_page = NULL;
+ goto err;
+ }
+
+ memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
+ }
+
/*
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
- if (!hv_isolation_type_snp() && !hv_root_partition) {
+ if (!ms_hyperv.paravisor_present && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
@@ -120,29 +156,96 @@ int hv_synic_alloc(void)
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
+
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ hv_cpu->synic_message_page = NULL;
goto err;
}
}
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ ret = set_memory_decrypted((unsigned long)
+ hv_cpu->synic_message_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
+ hv_cpu->synic_message_page = NULL;
+
+ /*
+ * Free the event page here so that hv_synic_free()
+ * won't later try to re-encrypt it.
+ */
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ hv_cpu->synic_event_page = NULL;
+ goto err;
+ }
+
+ ret = set_memory_decrypted((unsigned long)
+ hv_cpu->synic_event_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
+ hv_cpu->synic_event_page = NULL;
+ goto err;
+ }
+
+ memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
+ memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+ }
}
return 0;
+
err:
/*
* Any memory allocations that succeeded will be freed when
* the caller cleans up by calling hv_synic_free()
*/
- return -ENOMEM;
+ return ret;
}
void hv_synic_free(void)
{
- int cpu;
+ int cpu, ret;
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
+ /* It's better to leak the page if the encryption fails. */
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+ if (hv_cpu->post_msg_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->post_msg_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt post msg page: %d\n", ret);
+ hv_cpu->post_msg_page = NULL;
+ }
+ }
+ }
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ if (hv_cpu->synic_message_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->synic_message_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
+ hv_cpu->synic_message_page = NULL;
+ }
+ }
+
+ if (hv_cpu->synic_event_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->synic_event_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
+ hv_cpu->synic_event_page = NULL;
+ }
+ }
+ }
+
+ free_page((unsigned long)hv_cpu->post_msg_page);
free_page((unsigned long)hv_cpu->synic_event_page);
free_page((unsigned long)hv_cpu->synic_message_page);
}
@@ -170,7 +273,7 @@ void hv_synic_enable_regs(unsigned int cpu)
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -189,7 +292,7 @@ void hv_synic_enable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -272,7 +375,7 @@ void hv_synic_disable_regs(unsigned int cpu)
* addresses.
*/
simp.simp_enabled = 0;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
} else {
@@ -284,7 +387,7 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
} else {