!262 Support Hygon Trusted Key Management run on CSV
Merge pull request !262 from xisme/6.6_dev_tkm_support_csv
This commit is contained in:
commit
9e657f9a9a
|
@ -5249,13 +5249,22 @@ static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64
|
|||
struct kvm_vpsp vpsp = {
|
||||
.kvm = kvm,
|
||||
.write_guest = kvm_write_guest,
|
||||
.read_guest = kvm_read_guest
|
||||
.read_guest = kvm_read_guest,
|
||||
.gfn_to_pfn = gfn_to_pfn,
|
||||
};
|
||||
switch (nr) {
|
||||
case KVM_HC_PSP_OP:
|
||||
ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3);
|
||||
break;
|
||||
|
||||
if (sev_guest(kvm)) {
|
||||
vpsp.vm_handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vpsp.is_csv_guest = 1;
|
||||
}
|
||||
|
||||
switch (nr) {
|
||||
case KVM_HC_PSP_COPY_FORWARD_OP:
|
||||
ret = kvm_pv_psp_copy_forward_op(&vpsp, a0, a1, a2);
|
||||
break;
|
||||
case KVM_HC_PSP_FORWARD_OP:
|
||||
ret = kvm_pv_psp_forward_op(&vpsp, a0, a1, a2);
|
||||
break;
|
||||
default:
|
||||
ret = -KVM_ENOSYS;
|
||||
break;
|
||||
|
|
|
@ -9879,7 +9879,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 &&
|
||||
!(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP))) {
|
||||
!(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION
|
||||
|| nr == KVM_HC_PSP_OP_OBSOLETE
|
||||
|| nr == KVM_HC_PSP_COPY_FORWARD_OP
|
||||
|| nr == KVM_HC_PSP_FORWARD_OP))) {
|
||||
ret = -KVM_EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -9916,7 +9919,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|||
kvm_sched_yield(vcpu, a0);
|
||||
ret = 0;
|
||||
break;
|
||||
case KVM_HC_PSP_OP:
|
||||
case KVM_HC_PSP_OP_OBSOLETE:
|
||||
case KVM_HC_PSP_COPY_FORWARD_OP:
|
||||
case KVM_HC_PSP_FORWARD_OP:
|
||||
ret = -KVM_ENOSYS;
|
||||
if (kvm_arch_hypercall)
|
||||
ret = kvm_arch_hypercall(vcpu->kvm, nr, a0, a1, a2, a3);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/psp.h>
|
||||
#include <linux/psp-hygon.h>
|
||||
#include <uapi/linux/psp-hygon.h>
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
#include <asm/csv.h>
|
||||
|
||||
|
@ -760,12 +761,12 @@ static int vpsp_dequeue_cmd(int prio, int index,
|
|||
* Populate the command from the virtual machine to the queue to
|
||||
* support execution in ringbuffer mode
|
||||
*/
|
||||
static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags)
|
||||
static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags)
|
||||
{
|
||||
struct csv_cmdptr_entry cmdptr = { };
|
||||
int index = -1;
|
||||
|
||||
cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid);
|
||||
cmdptr.cmd_buf_ptr = phy_addr;
|
||||
cmdptr.cmd_id = cmd;
|
||||
cmdptr.cmd_flags = flags;
|
||||
|
||||
|
@ -1065,12 +1066,91 @@ end:
|
|||
return rb_supported;
|
||||
}
|
||||
|
||||
int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret);
|
||||
static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret)
|
||||
{
|
||||
struct psp_device *psp = psp_master;
|
||||
struct sev_device *sev;
|
||||
unsigned int phys_lsb, phys_msb;
|
||||
unsigned int reg, ret = 0;
|
||||
|
||||
if (!psp || !psp->sev_data)
|
||||
return -ENODEV;
|
||||
|
||||
if (*hygon_psp_hooks.psp_dead)
|
||||
return -EBUSY;
|
||||
|
||||
sev = psp->sev_data;
|
||||
|
||||
/* Get the physical address of the command buffer */
|
||||
phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0;
|
||||
phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0;
|
||||
|
||||
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
|
||||
cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout);
|
||||
|
||||
iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
|
||||
iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
|
||||
|
||||
sev->int_rcvd = 0;
|
||||
|
||||
reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
|
||||
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
|
||||
|
||||
/* wait for command completion */
|
||||
ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout);
|
||||
if (ret) {
|
||||
if (psp_ret)
|
||||
*psp_ret = 0;
|
||||
|
||||
dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
|
||||
*hygon_psp_hooks.psp_dead = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
*hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout;
|
||||
|
||||
if (psp_ret)
|
||||
*psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
|
||||
|
||||
if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
|
||||
dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
|
||||
cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret)
|
||||
{
|
||||
int rc;
|
||||
int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled);
|
||||
|
||||
if (is_vendor_hygon() && mutex_enabled) {
|
||||
if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex,
|
||||
PSP_MUTEX_TIMEOUT) != 1) {
|
||||
return -EBUSY;
|
||||
}
|
||||
} else {
|
||||
mutex_lock(hygon_psp_hooks.sev_cmd_mutex);
|
||||
}
|
||||
|
||||
rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret);
|
||||
|
||||
if (is_vendor_hygon() && mutex_enabled)
|
||||
psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex);
|
||||
else
|
||||
mutex_unlock(hygon_psp_hooks.sev_cmd_mutex);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to obtain the result again by the command index, this
|
||||
* interface is used in ringbuffer mode
|
||||
*/
|
||||
int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data,
|
||||
int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr,
|
||||
struct vpsp_ret *psp_ret)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -1093,8 +1173,7 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data,
|
|||
/* dequeue command from queue*/
|
||||
vpsp_dequeue_cmd(prio, index, &cmd);
|
||||
|
||||
ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data,
|
||||
(int *)psp_ret);
|
||||
ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret);
|
||||
psp_ret->status = VPSP_FINISH;
|
||||
vpsp_psp_mutex_unlock();
|
||||
if (unlikely(ret)) {
|
||||
|
@ -1137,7 +1216,7 @@ EXPORT_SYMBOL_GPL(vpsp_try_get_result);
|
|||
* vpsp_try_get_result interface will be used to obtain the result
|
||||
* later again
|
||||
*/
|
||||
int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret)
|
||||
int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret)
|
||||
{
|
||||
int ret = 0;
|
||||
int rb_supported;
|
||||
|
@ -1152,10 +1231,10 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret)
|
|||
(struct vpsp_cmd *)&cmd);
|
||||
if (rb_supported) {
|
||||
/* fill command in ringbuffer's queue and get index */
|
||||
index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0);
|
||||
index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0);
|
||||
if (unlikely(index < 0)) {
|
||||
/* do mailbox command if queuing failed*/
|
||||
ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret);
|
||||
ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret);
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -EIO) {
|
||||
ret = 0;
|
||||
|
@ -1171,14 +1250,14 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret)
|
|||
}
|
||||
|
||||
/* try to get result from the ringbuffer command */
|
||||
ret = vpsp_try_get_result(vid, prio, index, data, psp_ret);
|
||||
ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret);
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
/* mailbox mode */
|
||||
ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret);
|
||||
ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret);
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -EIO) {
|
||||
ret = 0;
|
||||
|
|
|
@ -30,6 +30,8 @@ enum HYGON_PSP_OPCODE {
|
|||
HYGON_PSP_MUTEX_ENABLE = 1,
|
||||
HYGON_PSP_MUTEX_DISABLE,
|
||||
HYGON_VPSP_CTRL_OPT,
|
||||
HYGON_PSP_OP_PIN_USER_PAGE,
|
||||
HYGON_PSP_OP_UNPIN_USER_PAGE,
|
||||
HYGON_PSP_OPCODE_MAX_NR,
|
||||
};
|
||||
|
||||
|
@ -38,16 +40,26 @@ enum VPSP_DEV_CTRL_OPCODE {
|
|||
VPSP_OP_VID_DEL,
|
||||
VPSP_OP_SET_DEFAULT_VID_PERMISSION,
|
||||
VPSP_OP_GET_DEFAULT_VID_PERMISSION,
|
||||
VPSP_OP_SET_GPA,
|
||||
};
|
||||
|
||||
struct vpsp_dev_ctrl {
|
||||
unsigned char op;
|
||||
/**
|
||||
* To be compatible with old user mode,
|
||||
* struct vpsp_dev_ctrl must be kept at 132 bytes.
|
||||
*/
|
||||
unsigned char resv[3];
|
||||
union {
|
||||
unsigned int vid;
|
||||
// Set or check the permissions for the default VID
|
||||
unsigned int def_vid_perm;
|
||||
struct {
|
||||
u64 gpa_start;
|
||||
u64 gpa_end;
|
||||
} gpa;
|
||||
unsigned char reserved[128];
|
||||
} data;
|
||||
} __packed data;
|
||||
};
|
||||
|
||||
uint64_t atomic64_exchange(volatile uint64_t *dst, uint64_t val)
|
||||
|
@ -169,19 +181,15 @@ DEFINE_RWLOCK(vpsp_rwlock);
|
|||
#define VPSP_VID_MAX_ENTRIES 2048
|
||||
#define VPSP_VID_NUM_MAX 64
|
||||
|
||||
struct vpsp_vid_entry {
|
||||
uint32_t vid;
|
||||
pid_t pid;
|
||||
};
|
||||
static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES];
|
||||
static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES];
|
||||
static uint32_t g_vpsp_vid_num;
|
||||
static int compare_vid_entries(const void *a, const void *b)
|
||||
{
|
||||
return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid;
|
||||
return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid;
|
||||
}
|
||||
static void swap_vid_entries(void *a, void *b, int size)
|
||||
{
|
||||
struct vpsp_vid_entry entry;
|
||||
struct vpsp_context entry;
|
||||
|
||||
memcpy(&entry, a, size);
|
||||
memcpy(a, b, size);
|
||||
|
@ -206,43 +214,41 @@ int vpsp_get_default_vid_permission(void)
|
|||
EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission);
|
||||
|
||||
/**
|
||||
* When the virtual machine executes the 'tkm' command,
|
||||
* it needs to retrieve the corresponding 'vid'
|
||||
* by performing a binary search using 'kvm->userspace_pid'.
|
||||
* get a vpsp context from pid
|
||||
*/
|
||||
int vpsp_get_vid(uint32_t *vid, pid_t pid)
|
||||
int vpsp_get_context(struct vpsp_context **ctx, pid_t pid)
|
||||
{
|
||||
struct vpsp_vid_entry new_entry = {.pid = pid};
|
||||
struct vpsp_vid_entry *existing_entry = NULL;
|
||||
struct vpsp_context new_entry = {.pid = pid};
|
||||
struct vpsp_context *existing_entry = NULL;
|
||||
|
||||
read_lock(&vpsp_rwlock);
|
||||
existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num,
|
||||
sizeof(struct vpsp_vid_entry), compare_vid_entries);
|
||||
existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num,
|
||||
sizeof(struct vpsp_context), compare_vid_entries);
|
||||
read_unlock(&vpsp_rwlock);
|
||||
|
||||
if (!existing_entry)
|
||||
return -ENOENT;
|
||||
if (vid) {
|
||||
*vid = existing_entry->vid;
|
||||
pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid);
|
||||
}
|
||||
|
||||
if (ctx)
|
||||
*ctx = existing_entry;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vpsp_get_vid);
|
||||
EXPORT_SYMBOL_GPL(vpsp_get_context);
|
||||
|
||||
/**
|
||||
* Upon qemu startup, this section checks whether
|
||||
* the '-device psp,vid' parameter is specified.
|
||||
* If set, it utilizes the 'vpsp_add_vid' function
|
||||
* to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'.
|
||||
* to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'.
|
||||
* The insertion is done in ascending order of 'pid'.
|
||||
*/
|
||||
static int vpsp_add_vid(uint32_t vid)
|
||||
{
|
||||
pid_t cur_pid = task_pid_nr(current);
|
||||
struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid};
|
||||
struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid};
|
||||
|
||||
if (vpsp_get_vid(NULL, cur_pid) == 0)
|
||||
if (vpsp_get_context(NULL, cur_pid) == 0)
|
||||
return -EEXIST;
|
||||
if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES)
|
||||
return -ENOMEM;
|
||||
|
@ -250,8 +256,8 @@ static int vpsp_add_vid(uint32_t vid)
|
|||
return -EINVAL;
|
||||
|
||||
write_lock(&vpsp_rwlock);
|
||||
memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry));
|
||||
sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry),
|
||||
memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context));
|
||||
sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context),
|
||||
compare_vid_entries, swap_vid_entries);
|
||||
pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num);
|
||||
write_unlock(&vpsp_rwlock);
|
||||
|
@ -270,12 +276,12 @@ static int vpsp_del_vid(void)
|
|||
|
||||
write_lock(&vpsp_rwlock);
|
||||
for (i = 0; i < g_vpsp_vid_num; ++i) {
|
||||
if (g_vpsp_vid_array[i].pid == cur_pid) {
|
||||
if (g_vpsp_context_array[i].pid == cur_pid) {
|
||||
--g_vpsp_vid_num;
|
||||
pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n",
|
||||
g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num);
|
||||
memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1],
|
||||
sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i));
|
||||
g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num);
|
||||
memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1],
|
||||
sizeof(struct vpsp_context) * (g_vpsp_vid_num - i));
|
||||
ret = 0;
|
||||
goto end;
|
||||
}
|
||||
|
@ -286,6 +292,85 @@ end:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end)
|
||||
{
|
||||
pid_t cur_pid = task_pid_nr(current);
|
||||
struct vpsp_context *ctx = NULL;
|
||||
|
||||
vpsp_get_context(&ctx, cur_pid);
|
||||
if (!ctx) {
|
||||
pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ctx->gpa_start = gpa_start;
|
||||
ctx->gpa_end = gpa_end;
|
||||
pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n",
|
||||
gpa_start, gpa_end, cur_pid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to pin a page
|
||||
*
|
||||
* @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE
|
||||
*/
|
||||
static int psp_pin_user_page(u64 vaddr)
|
||||
{
|
||||
struct page *page;
|
||||
long npinned = 0;
|
||||
int ref_count = 0;
|
||||
|
||||
// check must be aligned to PAGE_SIZE
|
||||
if (vaddr & (PAGE_SIZE - 1)) {
|
||||
pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
npinned = pin_user_pages_fast(vaddr, 1, FOLL_WRITE, &page);
|
||||
if (npinned != 1) {
|
||||
pr_err("PSP: pin_user_pages_fast fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ref_count = page_ref_count(page);
|
||||
pr_debug("pin user page with address %llx, page ref_count %d\n", vaddr, ref_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to unpin a page
|
||||
*
|
||||
* @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE
|
||||
*/
|
||||
static int psp_unpin_user_page(u64 vaddr)
|
||||
{
|
||||
struct page *page;
|
||||
long npinned = 0;
|
||||
int ref_count = 0;
|
||||
|
||||
// check must be aligned to PAGE_SIZE
|
||||
if (vaddr & (PAGE_SIZE - 1)) {
|
||||
pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
// page reference count increment by 1
|
||||
npinned = get_user_pages_fast(vaddr, 1, FOLL_WRITE, &page);
|
||||
if (npinned != 1) {
|
||||
pr_err("PSP: pin_user_pages_fast fail\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
// page reference count decrement by 2
|
||||
put_page(page);
|
||||
put_page(page);
|
||||
|
||||
ref_count = page_ref_count(page);
|
||||
pr_debug("unpin user page with address %llx, page ref_count %d\n", vaddr, ref_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -308,6 +393,10 @@ static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl)
|
|||
ctrl->data.def_vid_perm = vpsp_get_default_vid_permission();
|
||||
break;
|
||||
|
||||
case VPSP_OP_SET_GPA:
|
||||
ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -364,6 +453,14 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg)
|
|||
return -EFAULT;
|
||||
break;
|
||||
|
||||
case HYGON_PSP_OP_PIN_USER_PAGE:
|
||||
ret = psp_pin_user_page((u64)arg);
|
||||
break;
|
||||
|
||||
case HYGON_PSP_OP_UNPIN_USER_PAGE:
|
||||
ret = psp_unpin_user_page((u64)arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_INFO "%s: invalid ioctl number: %d\n", __func__, opcode);
|
||||
return -EINVAL;
|
||||
|
@ -507,100 +604,6 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret)
|
||||
{
|
||||
struct psp_device *psp = psp_master;
|
||||
struct sev_device *sev;
|
||||
phys_addr_t phys_addr;
|
||||
unsigned int phys_lsb, phys_msb;
|
||||
unsigned int reg, ret = 0;
|
||||
|
||||
if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed)
|
||||
return -ENODEV;
|
||||
|
||||
if (*hygon_psp_hooks.psp_dead)
|
||||
return -EBUSY;
|
||||
|
||||
sev = psp->sev_data;
|
||||
|
||||
if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the physical address of the command buffer */
|
||||
phys_addr = PUT_PSP_VID(__psp_pa(data), vid);
|
||||
phys_lsb = data ? lower_32_bits(phys_addr) : 0;
|
||||
phys_msb = data ? upper_32_bits(phys_addr) : 0;
|
||||
|
||||
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
|
||||
cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout);
|
||||
|
||||
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
|
||||
hygon_psp_hooks.sev_cmd_buffer_len(cmd), false);
|
||||
|
||||
iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
|
||||
iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
|
||||
|
||||
sev->int_rcvd = 0;
|
||||
|
||||
reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
|
||||
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
|
||||
|
||||
/* wait for command completion */
|
||||
ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout);
|
||||
if (ret) {
|
||||
if (psp_ret)
|
||||
*psp_ret = 0;
|
||||
|
||||
dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
|
||||
*hygon_psp_hooks.psp_dead = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
*hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout;
|
||||
|
||||
if (psp_ret)
|
||||
*psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
|
||||
|
||||
if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
|
||||
dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
|
||||
cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
|
||||
hygon_psp_hooks.sev_cmd_buffer_len(cmd), false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret)
|
||||
{
|
||||
int rc;
|
||||
int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled);
|
||||
|
||||
if (!hygon_psp_hooks.sev_dev_hooks_installed)
|
||||
return -ENODEV;
|
||||
|
||||
if (mutex_enabled) {
|
||||
if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex,
|
||||
PSP_MUTEX_TIMEOUT) != 1) {
|
||||
return -EBUSY;
|
||||
}
|
||||
} else {
|
||||
mutex_lock(hygon_psp_hooks.sev_cmd_mutex);
|
||||
}
|
||||
|
||||
rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret);
|
||||
|
||||
if (is_vendor_hygon() && mutex_enabled)
|
||||
psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex);
|
||||
else
|
||||
mutex_unlock(hygon_psp_hooks.sev_cmd_mutex);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int psp_do_cmd(int cmd, void *data, int *psp_ret)
|
||||
{
|
||||
int rc;
|
||||
|
|
|
@ -13,399 +13,103 @@
|
|||
#include <linux/psp-sev.h>
|
||||
#include <linux/psp.h>
|
||||
#include <linux/psp-hygon.h>
|
||||
#include <asm/cpuid.h>
|
||||
|
||||
#ifdef pr_fmt
|
||||
#undef pr_fmt
|
||||
#endif
|
||||
#define pr_fmt(fmt) "vpsp: " fmt
|
||||
#define VTKM_VM_BIND 0x904
|
||||
|
||||
/*
|
||||
* The file mainly implements the base execution
|
||||
* logic of virtual PSP in kernel mode, which mainly includes:
|
||||
* (1) Obtain the VM command and preprocess the pointer
|
||||
* mapping table information in the command buffer
|
||||
* (2) The command that has been converted will interact
|
||||
* with the channel of the psp through the driver and
|
||||
* try to obtain the execution result
|
||||
* (3) The executed command data is recovered according to
|
||||
* the multilevel pointer of the mapping table, and then returned to the VM
|
||||
* The file mainly implements the base execution logic of virtual PSP in kernel mode,
|
||||
* which mainly includes:
|
||||
* (1) Preprocess the guest data in the host kernel
|
||||
* (2) The command that has been converted will interact with the channel of the
|
||||
* psp through the driver and try to obtain the execution result
|
||||
* (3) The executed command data is recovered, and then returned to the VM
|
||||
*
|
||||
* The primary implementation logic of virtual PSP in kernel mode
|
||||
* call trace:
|
||||
* guest command(vmmcall)
|
||||
* |
|
||||
* | |-> kvm_pv_psp_cmd_pre_op
|
||||
* | |
|
||||
* | | -> guest_addr_map_table_op
|
||||
* | |
|
||||
* | | -> guest_multiple_level_gpa_replace
|
||||
* guest command(vmmcall, KVM_HC_PSP_COPY_FORWARD_OP)
|
||||
* |
|
||||
* kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver
|
||||
* |
|
||||
* |
|
||||
* |-> kvm_pv_psp_cmd_post_op
|
||||
* kvm_pv_psp_copy_op----> | -> kvm_pv_psp_cmd_pre_op
|
||||
* |
|
||||
* | -> guest_addr_map_table_op
|
||||
* |
|
||||
* | -> guest_multiple_level_gpa_restore
|
||||
* | -> vpsp_try_do_cmd/vpsp_try_get_result
|
||||
* | |<=> psp device driver
|
||||
* |
|
||||
* |
|
||||
* |-> kvm_pv_psp_cmd_post_op
|
||||
*
|
||||
* guest command(vmmcall, KVM_HC_PSP_FORWARD_OP)
|
||||
* |
|
||||
* kvm_pv_psp_forward_op-> |-> vpsp_try_do_cmd/vpsp_try_get_result
|
||||
* |<=> psp device driver
|
||||
*/
|
||||
|
||||
#define TKM_CMD_ID_MIN 0x120
|
||||
#define TKM_CMD_ID_MAX 0x12f
|
||||
|
||||
struct psp_cmdresp_head {
|
||||
uint32_t buf_size;
|
||||
uint32_t cmdresp_size;
|
||||
uint32_t cmdresp_code;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct map_tbl - multilevel pointer address mapping table
|
||||
*
|
||||
* @parent_pa: parent address block's physics address
|
||||
* @offset: offset in parent address block
|
||||
* @size: submemory size
|
||||
* @align: submemory align size, hva need to keep size alignment in kernel
|
||||
* @hva: submemory copy block in kernel virtual address
|
||||
*/
|
||||
struct map_tbl {
|
||||
uint64_t parent_pa;
|
||||
uint32_t offset;
|
||||
uint32_t size;
|
||||
uint32_t align;
|
||||
uint64_t hva;
|
||||
} __packed;
|
||||
|
||||
struct addr_map_tbls {
|
||||
uint32_t tbl_nums;
|
||||
struct map_tbl tbl[];
|
||||
} __packed;
|
||||
|
||||
/* gpa and hva conversion maintenance table for internal use */
|
||||
struct gpa2hva_t {
|
||||
void *hva;
|
||||
gpa_t gpa;
|
||||
};
|
||||
|
||||
struct gpa2hva_tbls {
|
||||
uint32_t max_nums;
|
||||
uint32_t tbl_nums;
|
||||
struct gpa2hva_t tbl[];
|
||||
};
|
||||
|
||||
/* save command data for restoring later */
|
||||
struct vpsp_hbuf_wrapper {
|
||||
void *data;
|
||||
uint32_t data_size;
|
||||
struct addr_map_tbls *map_tbls;
|
||||
struct gpa2hva_tbls *g2h_tbls;
|
||||
};
|
||||
|
||||
/* Virtual PSP host memory information maintenance, used in ringbuffer mode */
|
||||
struct vpsp_hbuf_wrapper
|
||||
g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0};
|
||||
|
||||
void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls)
|
||||
static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums);
|
||||
for (i = 0; i < tbls->tbl_nums; i++) {
|
||||
pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx",
|
||||
i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset,
|
||||
tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva);
|
||||
}
|
||||
pr_info("\n");
|
||||
}
|
||||
|
||||
void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums,
|
||||
tbls->max_nums);
|
||||
for (i = 0; i < tbls->tbl_nums; i++)
|
||||
pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i,
|
||||
(uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa);
|
||||
pr_info("\n");
|
||||
}
|
||||
|
||||
static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa)
|
||||
{
|
||||
uint32_t fill_idx = tbls->tbl_nums;
|
||||
|
||||
if (fill_idx >= tbls->max_nums)
|
||||
if (!vpsp_ctx || !addr)
|
||||
return -EFAULT;
|
||||
|
||||
tbls->tbl[fill_idx].hva = hva;
|
||||
tbls->tbl[fill_idx].gpa = gpa;
|
||||
tbls->tbl_nums = fill_idx + 1;
|
||||
if (addr >= vpsp_ctx->gpa_start && (addr + size) <= vpsp_ctx->gpa_end)
|
||||
return 0;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int check_psp_mem_range(struct vpsp_context *vpsp_ctx,
|
||||
void *data, uint32_t size)
|
||||
{
|
||||
if ((((uintptr_t)data + size - 1) & ~PSP_2MB_MASK) !=
|
||||
((uintptr_t)data & ~PSP_2MB_MASK)) {
|
||||
pr_err("data %llx, size %d crossing 2MB\n", (u64)data, size);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (vpsp_ctx)
|
||||
return check_gpa_range(vpsp_ctx, (gpa_t)data, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < g2h->tbl_nums; i++) {
|
||||
if (g2h->tbl[i].hva == hva)
|
||||
g2h->tbl[i].hva = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < g2h->tbl_nums; i++) {
|
||||
if (g2h->tbl[i].gpa == gpa)
|
||||
return (void *)g2h->tbl[i].hva;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < g2h->tbl_nums; i++) {
|
||||
if (g2h->tbl[i].hva == hva)
|
||||
return g2h->tbl[i].gpa;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The virtual machine multilevel pointer command buffer handles the
|
||||
* execution entity, synchronizes the data in the original gpa to the
|
||||
* newly allocated hva(host virtual address) and updates the mapping
|
||||
* relationship in the parent memory
|
||||
*/
|
||||
static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp,
|
||||
struct map_tbl *tbl, struct gpa2hva_tbls *g2h)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t sub_block_size;
|
||||
uint64_t sub_paddr;
|
||||
void *parent_kva = NULL;
|
||||
|
||||
/* kmalloc memory for child block */
|
||||
sub_block_size = max(tbl->size, tbl->align);
|
||||
tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL);
|
||||
if (!tbl->hva)
|
||||
return -ENOMEM;
|
||||
|
||||
/* get child gpa from parent gpa */
|
||||
if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset,
|
||||
&sub_paddr, sizeof(sub_paddr)))) {
|
||||
pr_err("[%s]: kvm_read_guest for parent gpa failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
/* copy child block data from gpa to hva */
|
||||
if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva,
|
||||
tbl->size))) {
|
||||
pr_err("[%s]: kvm_read_guest for sub_data failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
/* get hva from gpa */
|
||||
parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa);
|
||||
if (unlikely(!parent_kva)) {
|
||||
pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
/* replace pa of hva from gpa */
|
||||
*(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva);
|
||||
|
||||
/* fill in gpa and hva to map table for restoring later */
|
||||
if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) {
|
||||
pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
e_free:
|
||||
kfree((const void *)tbl->hva);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* The virtual machine multi-level pointer command memory handles the
|
||||
* execution entity, synchronizes the data in the hva(host virtual
|
||||
* address) back to the memory corresponding to the gpa, and restores
|
||||
* the mapping relationship in the original parent memory
|
||||
*/
|
||||
static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp,
|
||||
struct map_tbl *tbl, struct gpa2hva_tbls *g2h)
|
||||
{
|
||||
int ret = 0;
|
||||
gpa_t sub_gpa;
|
||||
void *parent_hva = NULL;
|
||||
|
||||
/* get gpa from hva */
|
||||
sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva);
|
||||
if (unlikely(!sub_gpa)) {
|
||||
pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* copy child block data from hva to gpa */
|
||||
if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva,
|
||||
tbl->size))) {
|
||||
pr_err("[%s]: kvm_write_guest for sub_gpa failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* get parent hva from parent gpa */
|
||||
parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa);
|
||||
if (unlikely(!parent_hva)) {
|
||||
pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* restore gpa from pa of hva in parent block */
|
||||
*(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa;
|
||||
|
||||
/* free child block memory */
|
||||
clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva);
|
||||
kfree((const void *)tbl->hva);
|
||||
tbl->hva = 0;
|
||||
|
||||
end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The virtual machine multilevel pointer command memory processing
|
||||
* executes upper-layer abstract interfaces, including replacing and
|
||||
* restoring two sub-processing functions
|
||||
*/
|
||||
static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h,
|
||||
struct addr_map_tbls *map_tbls, int op)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
uint64_t *sub_paddr_ptr;
|
||||
|
||||
if (op) {
|
||||
for (i = map_tbls->tbl_nums - 1; i >= 0; i--) {
|
||||
/* check if the gpa of root points to itself */
|
||||
if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) {
|
||||
sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva
|
||||
+ map_tbls->tbl[i].offset);
|
||||
/* if the child paddr is equal to the parent paddr */
|
||||
if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) {
|
||||
*sub_paddr_ptr = g2h->tbl[0].gpa;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* restore new pa of kva with the gpa from guest */
|
||||
if (unlikely(guest_multiple_level_gpa_restore(vpsp,
|
||||
&map_tbls->tbl[i], g2h))) {
|
||||
pr_err("[%s]: guest_multiple_level_gpa_restore failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < map_tbls->tbl_nums; i++) {
|
||||
/* check if the gpa of root points to itself */
|
||||
if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) {
|
||||
sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva
|
||||
+ map_tbls->tbl[i].offset);
|
||||
/* if the child paddr is equal to the parent paddr */
|
||||
if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) {
|
||||
*sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva);
|
||||
map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* check if parent_pa is valid */
|
||||
if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) {
|
||||
pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n",
|
||||
__func__, i, map_tbls->tbl[i].parent_pa);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* replace the gpa from guest with the new pa of kva */
|
||||
if (unlikely(guest_multiple_level_gpa_replace(vpsp,
|
||||
&map_tbls->tbl[i], g2h))) {
|
||||
pr_err("[%s]: guest_multiple_level_gpa_replace failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls
|
||||
*map_tbl, void *data)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (g2h) {
|
||||
for (i = 0; i < g2h->tbl_nums; i++) {
|
||||
if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) {
|
||||
kfree(g2h->tbl[i].hva);
|
||||
g2h->tbl[i].hva = NULL;
|
||||
}
|
||||
}
|
||||
kfree(g2h);
|
||||
}
|
||||
|
||||
kfree(map_tbl);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain the VM command and preprocess the pointer mapping table
|
||||
* information in the command buffer, the processed data will be
|
||||
* used to interact with the psp device
|
||||
/**
|
||||
* Copy the guest data to the host kernel buffer
|
||||
* and record the host buffer address in 'hbuf'.
|
||||
* This 'hbuf' is used to restore context information
|
||||
* during asynchronous processing.
|
||||
*/
|
||||
static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
|
||||
gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf)
|
||||
struct vpsp_hbuf_wrapper *hbuf)
|
||||
{
|
||||
int ret = 0;
|
||||
void *data = NULL;
|
||||
struct psp_cmdresp_head psp_head;
|
||||
uint32_t data_size;
|
||||
struct addr_map_tbls map_head, *map_tbls = NULL;
|
||||
uint32_t map_tbl_size;
|
||||
struct gpa2hva_tbls *g2h = NULL;
|
||||
uint32_t g2h_tbl_size;
|
||||
|
||||
if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head,
|
||||
sizeof(struct psp_cmdresp_head))))
|
||||
return -EFAULT;
|
||||
|
||||
data_size = psp_head.buf_size;
|
||||
if (check_psp_mem_range(NULL, (void *)data_gpa, data_size))
|
||||
return -EFAULT;
|
||||
|
||||
data = kzalloc(data_size, GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
@ -415,87 +119,18 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
|
|||
goto end;
|
||||
}
|
||||
|
||||
if (table_gpa) {
|
||||
/* parse address map table from guest */
|
||||
if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head,
|
||||
sizeof(struct addr_map_tbls)))) {
|
||||
pr_err("[%s]: kvm_read_guest for map_head failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums
|
||||
* sizeof(struct map_tbl);
|
||||
map_tbls = kzalloc(map_tbl_size, GFP_KERNEL);
|
||||
if (!map_tbls) {
|
||||
ret = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls,
|
||||
map_tbl_size))) {
|
||||
pr_err("[%s]: kvm_read_guest for map_tbls failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* init for gpa2hva table*/
|
||||
g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums
|
||||
+ 1) * sizeof(struct gpa2hva_t);
|
||||
g2h = kzalloc(g2h_tbl_size, GFP_KERNEL);
|
||||
if (!g2h) {
|
||||
ret = -ENOMEM;
|
||||
goto end;
|
||||
}
|
||||
g2h->max_nums = map_head.tbl_nums + 1;
|
||||
|
||||
/* fill the root parent address */
|
||||
if (gpa2hva_tbl_fill(g2h, data, data_gpa)) {
|
||||
pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) {
|
||||
pr_err("[%s]: guest_addr_map_table_op for replacing failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
hbuf->data = data;
|
||||
hbuf->data_size = data_size;
|
||||
hbuf->map_tbls = map_tbls;
|
||||
hbuf->g2h_tbls = g2h;
|
||||
|
||||
end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The executed command data is recovered according to the multilevel
|
||||
* pointer of the mapping table when the command has finished
|
||||
* interacting with the psp device
|
||||
*/
|
||||
static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
|
||||
struct vpsp_hbuf_wrapper *hbuf)
|
||||
struct vpsp_hbuf_wrapper *hbuf)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (hbuf->map_tbls) {
|
||||
if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls,
|
||||
hbuf->map_tbls, 1)) {
|
||||
pr_err("[%s]: guest_addr_map_table_op for restoring failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* restore cmdresp's buffer from context */
|
||||
if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data,
|
||||
hbuf->data_size))) {
|
||||
|
@ -504,12 +139,9 @@ static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa,
|
|||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
end:
|
||||
/* release memory and clear hbuf */
|
||||
kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data);
|
||||
kfree(hbuf->data);
|
||||
memset(hbuf, 0, sizeof(*hbuf));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -520,38 +152,325 @@ static int cmd_type_is_tkm(int cmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The primary implementation interface of virtual PSP in kernel mode
|
||||
static int cmd_type_is_allowed(int cmd)
|
||||
{
|
||||
if (cmd >= TKM_PSP_CMDID_OFFSET && cmd <= TKM_CMD_ID_MAX)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct psp_cmdresp_vtkm_vm_bind {
|
||||
struct psp_cmdresp_head head;
|
||||
uint16_t vid;
|
||||
uint32_t vm_handle;
|
||||
uint8_t reserved[46];
|
||||
} __packed;
|
||||
|
||||
static int kvm_bind_vtkm(uint32_t vm_handle, uint32_t cmd_id, uint32_t vid, uint32_t *pret)
|
||||
{
|
||||
int ret = 0;
|
||||
struct psp_cmdresp_vtkm_vm_bind *data;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->head.buf_size = sizeof(*data);
|
||||
data->head.cmdresp_size = sizeof(*data);
|
||||
data->head.cmdresp_code = VTKM_VM_BIND;
|
||||
data->vid = vid;
|
||||
data->vm_handle = vm_handle;
|
||||
|
||||
ret = psp_do_cmd(cmd_id, data, pret);
|
||||
if (ret == -EIO)
|
||||
ret = 0;
|
||||
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long vpsp_get_me_mask(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
unsigned long me_mask;
|
||||
|
||||
#define AMD_SME_BIT BIT(0)
|
||||
#define AMD_SEV_BIT BIT(1)
|
||||
/*
|
||||
* Check for the SME/SEV feature:
|
||||
* CPUID Fn8000_001F[EAX]
|
||||
* - Bit 0 - Secure Memory Encryption support
|
||||
* - Bit 1 - Secure Encrypted Virtualization support
|
||||
* CPUID Fn8000_001F[EBX]
|
||||
* - Bits 5:0 - Pagetable bit position used to indicate encryption
|
||||
*/
|
||||
eax = 0x8000001f;
|
||||
ecx = 0;
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
/* Check whether SEV or SME is supported */
|
||||
if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
|
||||
return 0;
|
||||
|
||||
me_mask = 1UL << (ebx & 0x3f);
|
||||
return me_mask;
|
||||
}
|
||||
|
||||
static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa)
|
||||
{
|
||||
phys_addr_t hpa = 0;
|
||||
unsigned long pfn = vpsp->gfn_to_pfn(vpsp->kvm, data_gpa >> PAGE_SHIFT);
|
||||
unsigned long me_mask = sme_get_me_mask();
|
||||
struct page *page;
|
||||
|
||||
if (me_mask == 0 && vpsp->is_csv_guest)
|
||||
me_mask = vpsp_get_me_mask();
|
||||
|
||||
if (!is_error_pfn(pfn))
|
||||
hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | me_mask;
|
||||
else {
|
||||
pr_err("[%s] pfn: %lx is invalid, gpa %lx",
|
||||
__func__, pfn, data_gpa);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Using gfn_to_pfn causes the refcount to increment
|
||||
* atomically by one, which needs to be released.
|
||||
*/
|
||||
page = pfn_to_page(pfn);
|
||||
if (PageCompound(page))
|
||||
page = compound_head(page);
|
||||
|
||||
put_page(page);
|
||||
|
||||
pr_debug("gpa %lx, hpa %llx\n", data_gpa, hpa);
|
||||
return hpa;
|
||||
|
||||
}
|
||||
|
||||
static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx,
|
||||
uint64_t data, uint32_t cmd)
|
||||
{
|
||||
int ret;
|
||||
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
|
||||
struct psp_cmdresp_head psp_head;
|
||||
|
||||
if (!cmd_type_is_allowed(vcmd->cmd_id)) {
|
||||
pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vpsp->is_csv_guest) {
|
||||
/**
|
||||
* If the gpa address range exists,
|
||||
* it means there must be a legal vid
|
||||
*/
|
||||
if (!vpsp_ctx || !vpsp_ctx->gpa_start || !vpsp_ctx->gpa_end) {
|
||||
pr_err("[%s]: No set gpa range or vid in csv guest\n", __func__);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ret = check_psp_mem_range(vpsp_ctx, (void *)data, 0);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
} else {
|
||||
if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id)
|
||||
&& !vpsp_get_default_vid_permission()) {
|
||||
pr_err("[%s]: not allowed tkm command without vid\n", __func__);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
// the 'data' is gpa address
|
||||
if (unlikely(vpsp->read_guest(vpsp->kvm, data, &psp_head,
|
||||
sizeof(struct psp_cmdresp_head))))
|
||||
return -EFAULT;
|
||||
|
||||
ret = check_psp_mem_range(vpsp_ctx, (void *)data, psp_head.buf_size);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp,
|
||||
struct vpsp_context *vpsp_ctx,
|
||||
uint64_t data, uint32_t cmd)
|
||||
{
|
||||
int ret = 0;
|
||||
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
|
||||
|
||||
if (!cmd_type_is_allowed(vcmd->cmd_id)) {
|
||||
pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vpsp->is_csv_guest) {
|
||||
pr_err("[%s]: unsupported run on csv guest\n", __func__);
|
||||
ret = -EPERM;
|
||||
} else {
|
||||
if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id)
|
||||
&& !vpsp_get_default_vid_permission()) {
|
||||
pr_err("[%s]: not allowed tkm command without vid\n", __func__);
|
||||
ret = -EPERM;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx,
|
||||
uint32_t cmd, uint32_t *psp_ret)
|
||||
{
|
||||
int ret;
|
||||
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
|
||||
|
||||
if (vpsp_ctx && !vpsp_ctx->vm_is_bound && vpsp->is_csv_guest) {
|
||||
ret = kvm_bind_vtkm(vpsp->vm_handle, vcmd->cmd_id,
|
||||
vpsp_ctx->vid, psp_ret);
|
||||
if (ret || *psp_ret) {
|
||||
pr_err("[%s] kvm bind vtkm failed with ret: %d, pspret: %d\n",
|
||||
__func__, ret, *psp_ret);
|
||||
return ret;
|
||||
}
|
||||
vpsp_ctx->vm_is_bound = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Directly convert the gpa address into hpa and forward it to PSP,
|
||||
* It is another form of kvm_pv_psp_copy_op, mainly used for csv VMs.
|
||||
*
|
||||
* @param vpsp points to kvm related data
|
||||
* @param cmd psp cmd id, bit 31 indicates queue priority
|
||||
* @param data_gpa guest physical address of input data
|
||||
* @param psp_ret indicates Asynchronous context information
|
||||
*
|
||||
* Since the csv guest memory cannot be read or written directly,
|
||||
* the shared asynchronous context information is shared through psp_ret and return value.
|
||||
*/
|
||||
int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa,
|
||||
gpa_t table_gpa)
|
||||
int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd,
|
||||
gpa_t data_gpa, uint32_t psp_ret)
|
||||
{
|
||||
int ret;
|
||||
uint64_t data_hpa;
|
||||
uint32_t index = 0, vid = 0;
|
||||
struct vpsp_ret psp_async = {0};
|
||||
struct vpsp_context *vpsp_ctx = NULL;
|
||||
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
|
||||
uint8_t prio = CSV_COMMAND_PRIORITY_LOW;
|
||||
phys_addr_t hpa;
|
||||
|
||||
vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid);
|
||||
|
||||
ret = check_cmd_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("directly operation not allowed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = vpsp_try_bind_vtkm(vpsp, vpsp_ctx, cmd, (uint32_t *)&psp_async);
|
||||
if (unlikely(ret || *(uint32_t *)&psp_async)) {
|
||||
pr_err("try to bind vtkm failed (ret %x, psp_async %x)\n",
|
||||
ret, *(uint32_t *)&psp_async);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (vpsp_ctx)
|
||||
vid = vpsp_ctx->vid;
|
||||
|
||||
*((uint32_t *)&psp_async) = psp_ret;
|
||||
|
||||
hpa = gpa_to_hpa(vpsp, data_gpa);
|
||||
if (unlikely(!hpa)) {
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
data_hpa = PUT_PSP_VID(hpa, vid);
|
||||
|
||||
switch (psp_async.status) {
|
||||
case VPSP_INIT:
|
||||
/* try to send command to the device for execution*/
|
||||
ret = vpsp_try_do_cmd(cmd, data_hpa, &psp_async);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("[%s]: vpsp_do_cmd failed\n", __func__);
|
||||
goto end;
|
||||
}
|
||||
break;
|
||||
|
||||
case VPSP_RUNNING:
|
||||
prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH :
|
||||
CSV_COMMAND_PRIORITY_LOW;
|
||||
index = psp_async.index;
|
||||
/* try to get the execution result from ringbuffer*/
|
||||
ret = vpsp_try_get_result(prio, index, data_hpa, &psp_async);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("[%s]: vpsp_try_get_result failed\n", __func__);
|
||||
goto end;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("[%s]: invalid command status\n", __func__);
|
||||
break;
|
||||
}
|
||||
|
||||
end:
|
||||
/**
|
||||
* In order to indicate both system errors and PSP errors,
|
||||
* the psp_async.pret field needs to be reused.
|
||||
*/
|
||||
psp_async.format = VPSP_RET_PSP_FORMAT;
|
||||
if (ret) {
|
||||
psp_async.format = VPSP_RET_SYS_FORMAT;
|
||||
if (ret > 0)
|
||||
ret = -ret;
|
||||
psp_async.pret = (uint16_t)ret;
|
||||
}
|
||||
return *((int *)&psp_async);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_pv_psp_forward_op);
|
||||
|
||||
/**
|
||||
* @brief copy data in gpa to host memory and send it to psp for processing.
|
||||
*
|
||||
* @param vpsp points to kvm related data
|
||||
* @param cmd psp cmd id, bit 31 indicates queue priority
|
||||
* @param data_gpa guest physical address of input data
|
||||
* @param psp_ret_gpa guest physical address of psp_ret
|
||||
*/
|
||||
int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa)
|
||||
{
|
||||
int ret = 0;
|
||||
struct vpsp_ret psp_ret = {0};
|
||||
struct vpsp_hbuf_wrapper hbuf = {0};
|
||||
struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd;
|
||||
struct vpsp_context *vpsp_ctx = NULL;
|
||||
phys_addr_t data_paddr = 0;
|
||||
uint8_t prio = CSV_COMMAND_PRIORITY_LOW;
|
||||
uint32_t index = 0;
|
||||
uint32_t vid = 0;
|
||||
|
||||
// only tkm cmd need vid
|
||||
if (cmd_type_is_tkm(vcmd->cmd_id)) {
|
||||
// check the permission to use the default vid when no vid is set
|
||||
ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid);
|
||||
if (ret && !vpsp_get_default_vid_permission()) {
|
||||
pr_err("[%s]: not allowed tkm command without vid\n", __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid);
|
||||
|
||||
ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("copy operation not allowed\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (vpsp_ctx)
|
||||
vid = vpsp_ctx->vid;
|
||||
|
||||
if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret,
|
||||
sizeof(psp_ret))))
|
||||
return -EFAULT;
|
||||
|
||||
switch (psp_ret.status) {
|
||||
case VPSP_INIT:
|
||||
/* multilevel pointer replace*/
|
||||
ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf);
|
||||
/* copy data from guest */
|
||||
ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &hbuf);
|
||||
if (unlikely(ret)) {
|
||||
psp_ret.status = VPSP_FINISH;
|
||||
pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n",
|
||||
|
@ -560,25 +479,22 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_
|
|||
goto end;
|
||||
}
|
||||
|
||||
data_paddr = PUT_PSP_VID(__psp_pa(hbuf.data), vid);
|
||||
/* try to send command to the device for execution*/
|
||||
ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data,
|
||||
(struct vpsp_ret *)&psp_ret);
|
||||
ret = vpsp_try_do_cmd(cmd, data_paddr, (struct vpsp_ret *)&psp_ret);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("[%s]: vpsp_do_cmd failed\n", __func__);
|
||||
pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
switch (psp_ret.status) {
|
||||
case VPSP_RUNNING:
|
||||
/* backup host memory message for restoring later*/
|
||||
if (psp_ret.status == VPSP_RUNNING) {
|
||||
prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH :
|
||||
CSV_COMMAND_PRIORITY_LOW;
|
||||
g_hbuf_wrap[prio][psp_ret.index] = hbuf;
|
||||
break;
|
||||
|
||||
case VPSP_FINISH:
|
||||
/* restore multilevel pointer data */
|
||||
} else if (psp_ret.status == VPSP_FINISH) {
|
||||
ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n",
|
||||
|
@ -586,11 +502,6 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_
|
|||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -598,35 +509,31 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_
|
|||
prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH :
|
||||
CSV_COMMAND_PRIORITY_LOW;
|
||||
index = psp_ret.index;
|
||||
data_paddr = PUT_PSP_VID(__psp_pa(g_hbuf_wrap[prio][index].data), vid);
|
||||
/* try to get the execution result from ringbuffer*/
|
||||
ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data,
|
||||
(struct vpsp_ret *)&psp_ret);
|
||||
ret = vpsp_try_get_result(prio, index, data_paddr,
|
||||
(struct vpsp_ret *)&psp_ret);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("[%s]: vpsp_try_get_result failed\n", __func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
switch (psp_ret.status) {
|
||||
case VPSP_RUNNING:
|
||||
break;
|
||||
|
||||
case VPSP_FINISH:
|
||||
/* restore multilevel pointer data */
|
||||
if (psp_ret.status == VPSP_RUNNING) {
|
||||
ret = 0;
|
||||
goto end;
|
||||
} else if (psp_ret.status == VPSP_FINISH) {
|
||||
/* copy data to guest */
|
||||
ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa,
|
||||
&g_hbuf_wrap[prio][index]);
|
||||
if (unlikely(ret)) {
|
||||
pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n",
|
||||
__func__);
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
goto end;
|
||||
}
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -638,4 +545,5 @@ end:
|
|||
/* return psp_ret to guest */
|
||||
vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret));
|
||||
return ret;
|
||||
} EXPORT_SYMBOL_GPL(kvm_pv_psp_op);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op);
|
||||
|
|
|
@ -433,31 +433,54 @@ struct vpsp_cmd {
|
|||
*
|
||||
* @pret: the return code from device
|
||||
* @resv: reserved bits
|
||||
* @format: indicates that the error is a unix error code(is 0) or a psp error(is 1)
|
||||
* @index: used to distinguish the position of command in the ringbuffer
|
||||
* @status: indicates the current status of the related command
|
||||
*/
|
||||
struct vpsp_ret {
|
||||
u32 pret : 16;
|
||||
u32 resv : 2;
|
||||
u32 resv : 1;
|
||||
u32 format : 1;
|
||||
u32 index : 12;
|
||||
u32 status : 2;
|
||||
};
|
||||
#define VPSP_RET_SYS_FORMAT 1
|
||||
#define VPSP_RET_PSP_FORMAT 0
|
||||
|
||||
struct kvm_vpsp {
|
||||
struct kvm *kvm;
|
||||
int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len);
|
||||
int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
|
||||
kvm_pfn_t (*gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
|
||||
u32 vm_handle;
|
||||
u8 is_csv_guest;
|
||||
};
|
||||
|
||||
#define PSP_2MB_MASK (2*1024*1024 - 1)
|
||||
#define PSP_HUGEPAGE_2MB (2*1024*1024)
|
||||
#define PSP_HUGEPAGE_NUM_MAX 128
|
||||
#define TKM_CMD_ID_MIN 0x120
|
||||
#define TKM_CMD_ID_MAX 0x12f
|
||||
#define TKM_PSP_CMDID TKM_CMD_ID_MIN
|
||||
#define TKM_PSP_CMDID_OFFSET 0x128
|
||||
#define PSP_VID_MASK 0xff
|
||||
#define PSP_VID_SHIFT 56
|
||||
#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT))
|
||||
#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK)
|
||||
#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT))
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
|
||||
struct vpsp_context {
|
||||
u32 vid;
|
||||
pid_t pid;
|
||||
u64 gpa_start;
|
||||
u64 gpa_end;
|
||||
|
||||
int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret);
|
||||
// `vm_is_bound` indicates whether the binding operation has been performed
|
||||
u32 vm_is_bound;
|
||||
u32 vm_handle; // only for csv
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
|
||||
|
||||
int psp_do_cmd(int cmd, void *data, int *psp_ret);
|
||||
|
||||
|
@ -472,20 +495,20 @@ int csv_check_stat_queue_status(int *psp_ret);
|
|||
*/
|
||||
int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret);
|
||||
|
||||
int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index,
|
||||
void *data, struct vpsp_ret *psp_ret);
|
||||
int vpsp_try_get_result(uint8_t prio, uint32_t index,
|
||||
phys_addr_t phy_addr, struct vpsp_ret *psp_ret);
|
||||
|
||||
int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret);
|
||||
int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret);
|
||||
|
||||
int vpsp_get_vid(uint32_t *vid, pid_t pid);
|
||||
int vpsp_get_context(struct vpsp_context **ctx, pid_t pid);
|
||||
|
||||
int vpsp_get_default_vid_permission(void);
|
||||
|
||||
int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa,
|
||||
gpa_t table_gpa);
|
||||
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
|
||||
int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa);
|
||||
|
||||
static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; }
|
||||
int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd,
|
||||
gpa_t data_gpa, uint32_t psp_ret);
|
||||
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
|
||||
|
||||
static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; }
|
||||
|
||||
|
@ -498,22 +521,31 @@ static inline int
|
|||
csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; }
|
||||
|
||||
static inline int
|
||||
vpsp_try_get_result(uint32_t vid, uint8_t prio,
|
||||
uint32_t index, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; }
|
||||
vpsp_try_get_result(uint8_t prio,
|
||||
uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { return -ENODEV; }
|
||||
|
||||
static inline int
|
||||
vpsp_try_do_cmd(uint32_t vid, int cmd,
|
||||
void *data, struct vpsp_ret *psp_ret) { return -ENODEV; }
|
||||
|
||||
static inline int
|
||||
vpsp_get_vid(uint32_t *vid, pid_t pid) { return -ENODEV; }
|
||||
vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr,
|
||||
struct vpsp_ret *psp_ret) { return -ENODEV; }
|
||||
|
||||
static inline int
|
||||
vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { return -ENODEV; }
|
||||
|
||||
static inline int
|
||||
vpsp_get_default_vid_permission(void) { return -ENODEV; }
|
||||
|
||||
static inline int
|
||||
kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa,
|
||||
gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; }
|
||||
kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa,
|
||||
gpa_t psp_ret_gpa) { return -ENODEV; }
|
||||
|
||||
static inline int
|
||||
kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd,
|
||||
gpa_t data_gpa, uint32_t psp_ret) { return -ENODEV; }
|
||||
|
||||
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
|
||||
|
||||
typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data);
|
||||
|
|
|
@ -31,7 +31,9 @@
|
|||
#define KVM_HC_SCHED_YIELD 11
|
||||
#define KVM_HC_MAP_GPA_RANGE 12
|
||||
#define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */
|
||||
#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */
|
||||
#define KVM_HC_PSP_OP_OBSOLETE 101 /* Specific to Hygon platform */
|
||||
#define KVM_HC_PSP_COPY_FORWARD_OP 102 /* Specific to Hygon platform */
|
||||
#define KVM_HC_PSP_FORWARD_OP 103 /* Specific to Hygon platform */
|
||||
|
||||
/*
|
||||
* hypercalls use architecture specific
|
||||
|
|
Loading…
Reference in New Issue