- Check cmdline_find_option()'s return value before further processing
- Clear temporary storage in the resctrl code to prevent access to an unexistent MSR - Add a simple throttling mechanism to protect the hypervisor from potentially malicious SEV guests issuing requests in rapid succession. In order to not jeopardize the sanity of everyone involved in maintaining this code, the request issuing side has received a cleanup, split in more or less trivial, small and digestible pieces. Otherwise, the code was threatening to become an unmaintainable mess. Therefore, that cleanup is marked indirectly also for stable so that there's no differences between the upstream code and the stable variant when it comes down to backporting more there. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmQW/64ACgkQEsHwGGHe VUoWzBAAl1KD4RR5EhrppOCl5mWtZmKUf+COag7RiqggXJyhCTXO+5N24dHcgoJB h60gY7Nxg0CpZVbkMDSpJIuclmlMkiCLgUeuvN6E5ofgb/ZSv9nDuCXPUtLQ962d T6071/v48G+2PVGm+PD1xAwP3065i3itVV/k6Xn8fxeXf/fq8L5eU5tADuFICI0b dKbd7U+TEQAh5E6BUwms2G1P0glJqqL37H22fTcyxI6D2T/UJLlc4+or5JmTofDa XJE/UHn+ZaGZYjhdr/BrlcxnY1jUTQH2K3wciADmNolkuCpDQJs6GgN98lXdhT34 vyWQVokHGEKE8Va6m5wZX90eKraSc27/0d5ZlHz/rIJgVBxp/VvCzqLUZRvkRwwk k7bVOeZHe6P+b0QQl7uL9U2ff0sV/4PX0NLr+jzQdlA2ZYuTV6YgBDl7nAe1Tw/J gJViAvDbm26mlTG1wQrvw9M2P4AQIYpEmD4KPs7j2aQafUgtGqfTBwyeKHXdtMLJ TrkEISZZ8BVVvYghctN4R21IryUSnfq2eXxPwxUMh78SrO8sC23QJ5PVqKM/enF8 azf/ZBgANidzqJ44k2Ow2bnO0ZTYZblvl3NUCMNa5SjQmzAEUzupHEKUgV10MFMR J3lspGU47BVeirFPWlCYKr+3Buwzur5xo5wCmrezxbN0fAo9k5M= =6UGz -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v6.3_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Borislav Petkov: "There's a little bit more 'movement' in there for my taste but it needs to happen and should make the code better after it. - Check cmdline_find_option()'s return value before further processing - Clear temporary storage in the resctrl code to prevent access to an unexistent MSR - Add a simple throttling mechanism to protect the hypervisor from potentially malicious SEV guests issuing requests in rapid succession. In order to not jeopardize the sanity of everyone involved in maintaining this code, the request issuing side has received a cleanup, split in more or less trivial, small and digestible pieces. Otherwise, the code was threatening to become an unmaintainable mess. Therefore, that cleanup is marked indirectly also for stable so that there's no differences between the upstream code and the stable variant when it comes down to backporting more there" * tag 'x86_urgent_for_v6.3_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Fix use of uninitialized buffer in sme_enable() x86/resctrl: Clear staged_config[] before and after it is used virt/coco/sev-guest: Add throttling awareness virt/coco/sev-guest: Convert the sw_exit_info_2 checking to a switch-case virt/coco/sev-guest: Do some code style cleanups virt/coco/sev-guest: Carve out the request issuing logic into a helper virt/coco/sev-guest: Remove the disable_vmpck label in handle_guest_request() virt/coco/sev-guest: Simplify extended guest request handling virt/coco/sev-guest: Check SEV_SNP attribute at probe time
This commit is contained in:
commit
4ac39c5910
|
@ -128,8 +128,9 @@ struct snp_psc_desc {
|
||||||
struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
|
struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/* Guest message request error code */
|
/* Guest message request error codes */
|
||||||
#define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32)
|
#define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32)
|
||||||
|
#define SNP_GUEST_REQ_ERR_BUSY BIT_ULL(33)
|
||||||
|
|
||||||
#define GHCB_MSR_TERM_REQ 0x100
|
#define GHCB_MSR_TERM_REQ 0x100
|
||||||
#define GHCB_MSR_TERM_REASON_SET_POS 12
|
#define GHCB_MSR_TERM_REASON_SET_POS 12
|
||||||
|
|
|
@ -368,7 +368,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||||
{
|
{
|
||||||
struct resctrl_schema *s;
|
struct resctrl_schema *s;
|
||||||
struct rdtgroup *rdtgrp;
|
struct rdtgroup *rdtgrp;
|
||||||
struct rdt_domain *dom;
|
|
||||||
struct rdt_resource *r;
|
struct rdt_resource *r;
|
||||||
char *tok, *resname;
|
char *tok, *resname;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -397,10 +396,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
rdt_staged_configs_clear();
|
||||||
list_for_each_entry(dom, &s->res->domains, list)
|
|
||||||
memset(dom->staged_config, 0, sizeof(dom->staged_config));
|
|
||||||
}
|
|
||||||
|
|
||||||
while ((tok = strsep(&buf, "\n")) != NULL) {
|
while ((tok = strsep(&buf, "\n")) != NULL) {
|
||||||
resname = strim(strsep(&tok, ":"));
|
resname = strim(strsep(&tok, ":"));
|
||||||
|
@ -445,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
rdt_staged_configs_clear();
|
||||||
rdtgroup_kn_unlock(of->kn);
|
rdtgroup_kn_unlock(of->kn);
|
||||||
cpus_read_unlock();
|
cpus_read_unlock();
|
||||||
return ret ?: nbytes;
|
return ret ?: nbytes;
|
||||||
|
|
|
@ -555,5 +555,6 @@ void __check_limbo(struct rdt_domain *d, bool force_free);
|
||||||
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
|
void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
|
||||||
void __init thread_throttle_mode_init(void);
|
void __init thread_throttle_mode_init(void);
|
||||||
void __init mbm_config_rftype_init(const char *config);
|
void __init mbm_config_rftype_init(const char *config);
|
||||||
|
void rdt_staged_configs_clear(void);
|
||||||
|
|
||||||
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
|
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
|
||||||
|
|
|
@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void rdt_staged_configs_clear(void)
|
||||||
|
{
|
||||||
|
struct rdt_resource *r;
|
||||||
|
struct rdt_domain *dom;
|
||||||
|
|
||||||
|
lockdep_assert_held(&rdtgroup_mutex);
|
||||||
|
|
||||||
|
for_each_alloc_capable_rdt_resource(r) {
|
||||||
|
list_for_each_entry(dom, &r->domains, list)
|
||||||
|
memset(dom->staged_config, 0, sizeof(dom->staged_config));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
|
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
|
||||||
* we can keep a bitmap of free CLOSIDs in a single integer.
|
* we can keep a bitmap of free CLOSIDs in a single integer.
|
||||||
|
@ -3107,7 +3120,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||||
{
|
{
|
||||||
struct resctrl_schema *s;
|
struct resctrl_schema *s;
|
||||||
struct rdt_resource *r;
|
struct rdt_resource *r;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
|
rdt_staged_configs_clear();
|
||||||
|
|
||||||
list_for_each_entry(s, &resctrl_schema_all, list) {
|
list_for_each_entry(s, &resctrl_schema_all, list) {
|
||||||
r = s->res;
|
r = s->res;
|
||||||
|
@ -3119,20 +3134,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||||
} else {
|
} else {
|
||||||
ret = rdtgroup_init_cat(s, rdtgrp->closid);
|
ret = rdtgroup_init_cat(s, rdtgrp->closid);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
|
ret = resctrl_arch_update_domains(r, rdtgrp->closid);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
rdt_last_cmd_puts("Failed to initialize allocations\n");
|
rdt_last_cmd_puts("Failed to initialize allocations\n");
|
||||||
return ret;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rdtgrp->mode = RDT_MODE_SHAREABLE;
|
rdtgrp->mode = RDT_MODE_SHAREABLE;
|
||||||
|
|
||||||
return 0;
|
out:
|
||||||
|
rdt_staged_configs_clear();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
|
||||||
|
|
|
@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
|
||||||
struct ghcb *ghcb;
|
struct ghcb *ghcb;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (!fw_err)
|
if (!fw_err)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
|
||||||
if (ret)
|
if (ret)
|
||||||
goto e_put;
|
goto e_put;
|
||||||
|
|
||||||
if (ghcb->save.sw_exit_info_2) {
|
*fw_err = ghcb->save.sw_exit_info_2;
|
||||||
|
switch (*fw_err) {
|
||||||
|
case 0:
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SNP_GUEST_REQ_ERR_BUSY:
|
||||||
|
ret = -EAGAIN;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SNP_GUEST_REQ_INVALID_LEN:
|
||||||
/* Number of expected pages are returned in RBX */
|
/* Number of expected pages are returned in RBX */
|
||||||
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
|
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
|
||||||
ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
|
|
||||||
input->data_npages = ghcb_get_rbx(ghcb);
|
input->data_npages = ghcb_get_rbx(ghcb);
|
||||||
|
ret = -ENOSPC;
|
||||||
*fw_err = ghcb->save.sw_exit_info_2;
|
break;
|
||||||
|
}
|
||||||
|
fallthrough;
|
||||||
|
default:
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
e_put:
|
e_put:
|
||||||
|
|
|
@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp)
|
||||||
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
|
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
|
||||||
((u64)bp->ext_cmd_line_ptr << 32));
|
((u64)bp->ext_cmd_line_ptr << 32));
|
||||||
|
|
||||||
cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
|
if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
|
if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
|
||||||
sme_me_mask = me_mask;
|
sme_me_mask = me_mask;
|
||||||
|
|
|
@ -31,6 +31,9 @@
|
||||||
#define AAD_LEN 48
|
#define AAD_LEN 48
|
||||||
#define MSG_HDR_VER 1
|
#define MSG_HDR_VER 1
|
||||||
|
|
||||||
|
#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
|
||||||
|
#define SNP_REQ_RETRY_DELAY (2*HZ)
|
||||||
|
|
||||||
struct snp_guest_crypto {
|
struct snp_guest_crypto {
|
||||||
struct crypto_aead *tfm;
|
struct crypto_aead *tfm;
|
||||||
u8 *iv, *authtag;
|
u8 *iv, *authtag;
|
||||||
|
@ -318,11 +321,94 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
|
||||||
return __enc_payload(snp_dev, req, payload, sz);
|
return __enc_payload(snp_dev, req, payload, sz);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
|
||||||
|
{
|
||||||
|
unsigned long err = 0xff, override_err = 0;
|
||||||
|
unsigned long req_start = jiffies;
|
||||||
|
unsigned int override_npages = 0;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
retry_request:
|
||||||
|
/*
|
||||||
|
* Call firmware to process the request. In this function the encrypted
|
||||||
|
* message enters shared memory with the host. So after this call the
|
||||||
|
* sequence number must be incremented or the VMPCK must be deleted to
|
||||||
|
* prevent reuse of the IV.
|
||||||
|
*/
|
||||||
|
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
|
||||||
|
switch (rc) {
|
||||||
|
case -ENOSPC:
|
||||||
|
/*
|
||||||
|
* If the extended guest request fails due to having too
|
||||||
|
* small of a certificate data buffer, retry the same
|
||||||
|
* guest request without the extended data request in
|
||||||
|
* order to increment the sequence number and thus avoid
|
||||||
|
* IV reuse.
|
||||||
|
*/
|
||||||
|
override_npages = snp_dev->input.data_npages;
|
||||||
|
exit_code = SVM_VMGEXIT_GUEST_REQUEST;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Override the error to inform callers the given extended
|
||||||
|
* request buffer size was too small and give the caller the
|
||||||
|
* required buffer size.
|
||||||
|
*/
|
||||||
|
override_err = SNP_GUEST_REQ_INVALID_LEN;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this call to the firmware succeeds, the sequence number can
|
||||||
|
* be incremented allowing for continued use of the VMPCK. If
|
||||||
|
* there is an error reflected in the return value, this value
|
||||||
|
* is checked further down and the result will be the deletion
|
||||||
|
* of the VMPCK and the error code being propagated back to the
|
||||||
|
* user as an ioctl() return code.
|
||||||
|
*/
|
||||||
|
goto retry_request;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
|
||||||
|
* throttled. Retry in the driver to avoid returning and reusing the
|
||||||
|
* message sequence number on a different message.
|
||||||
|
*/
|
||||||
|
case -EAGAIN:
|
||||||
|
if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
|
||||||
|
rc = -ETIMEDOUT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
|
||||||
|
goto retry_request;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Increment the message sequence number. There is no harm in doing
|
||||||
|
* this now because decryption uses the value stored in the response
|
||||||
|
* structure and any failure will wipe the VMPCK, preventing further
|
||||||
|
* use anyway.
|
||||||
|
*/
|
||||||
|
snp_inc_msg_seqno(snp_dev);
|
||||||
|
|
||||||
|
if (fw_err)
|
||||||
|
*fw_err = override_err ?: err;
|
||||||
|
|
||||||
|
if (override_npages)
|
||||||
|
snp_dev->input.data_npages = override_npages;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If an extended guest request was issued and the supplied certificate
|
||||||
|
* buffer was not large enough, a standard guest request was issued to
|
||||||
|
* prevent IV reuse. If the standard request was successful, return -EIO
|
||||||
|
* back to the caller as would have originally been returned.
|
||||||
|
*/
|
||||||
|
if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
|
static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
|
||||||
u8 type, void *req_buf, size_t req_sz, void *resp_buf,
|
u8 type, void *req_buf, size_t req_sz, void *resp_buf,
|
||||||
u32 resp_sz, __u64 *fw_err)
|
u32 resp_sz, __u64 *fw_err)
|
||||||
{
|
{
|
||||||
unsigned long err;
|
|
||||||
u64 seqno;
|
u64 seqno;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -338,85 +424,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/*
|
rc = __handle_guest_request(snp_dev, exit_code, fw_err);
|
||||||
* Call firmware to process the request. In this function the encrypted
|
|
||||||
* message enters shared memory with the host. So after this call the
|
|
||||||
* sequence number must be incremented or the VMPCK must be deleted to
|
|
||||||
* prevent reuse of the IV.
|
|
||||||
*/
|
|
||||||
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the extended guest request fails due to having too small of a
|
|
||||||
* certificate data buffer, retry the same guest request without the
|
|
||||||
* extended data request in order to increment the sequence number
|
|
||||||
* and thus avoid IV reuse.
|
|
||||||
*/
|
|
||||||
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
|
|
||||||
err == SNP_GUEST_REQ_INVALID_LEN) {
|
|
||||||
const unsigned int certs_npages = snp_dev->input.data_npages;
|
|
||||||
|
|
||||||
exit_code = SVM_VMGEXIT_GUEST_REQUEST;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this call to the firmware succeeds, the sequence number can
|
|
||||||
* be incremented allowing for continued use of the VMPCK. If
|
|
||||||
* there is an error reflected in the return value, this value
|
|
||||||
* is checked further down and the result will be the deletion
|
|
||||||
* of the VMPCK and the error code being propagated back to the
|
|
||||||
* user as an ioctl() return code.
|
|
||||||
*/
|
|
||||||
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Override the error to inform callers the given extended
|
|
||||||
* request buffer size was too small and give the caller the
|
|
||||||
* required buffer size.
|
|
||||||
*/
|
|
||||||
err = SNP_GUEST_REQ_INVALID_LEN;
|
|
||||||
snp_dev->input.data_npages = certs_npages;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Increment the message sequence number. There is no harm in doing
|
|
||||||
* this now because decryption uses the value stored in the response
|
|
||||||
* structure and any failure will wipe the VMPCK, preventing further
|
|
||||||
* use anyway.
|
|
||||||
*/
|
|
||||||
snp_inc_msg_seqno(snp_dev);
|
|
||||||
|
|
||||||
if (fw_err)
|
|
||||||
*fw_err = err;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If an extended guest request was issued and the supplied certificate
|
|
||||||
* buffer was not large enough, a standard guest request was issued to
|
|
||||||
* prevent IV reuse. If the standard request was successful, return -EIO
|
|
||||||
* back to the caller as would have originally been returned.
|
|
||||||
*/
|
|
||||||
if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_alert(snp_dev->dev,
|
if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
|
||||||
"Detected error from ASP request. rc: %d, fw_err: %llu\n",
|
return rc;
|
||||||
rc, *fw_err);
|
|
||||||
goto disable_vmpck;
|
dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
|
||||||
|
snp_disable_vmpck(snp_dev);
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
|
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_alert(snp_dev->dev,
|
dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
|
||||||
"Detected unexpected decode failure from ASP. rc: %d\n",
|
snp_disable_vmpck(snp_dev);
|
||||||
rc);
|
return rc;
|
||||||
goto disable_vmpck;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
disable_vmpck:
|
|
||||||
snp_disable_vmpck(snp_dev);
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
|
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
|
||||||
|
@ -703,6 +728,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
|
||||||
void __iomem *mapping;
|
void __iomem *mapping;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
if (!dev->platform_data)
|
if (!dev->platform_data)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue