1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Secure Encrypted Virtualization (SEV) guest driver interface
5 * Copyright (C) 2021 Advanced Micro Devices, Inc.
7 * Author: Brijesh Singh <brijesh.singh@amd.com>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
15 #include <linux/platform_device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/set_memory.h>
19 #include <crypto/aead.h>
20 #include <linux/scatterlist.h>
21 #include <linux/psp-sev.h>
22 #include <uapi/linux/sev-guest.h>
23 #include <uapi/linux/psp-sev.h>
28 #include "sev-guest.h"
30 #define DEVICE_NAME "sev-guest"
34 struct snp_guest_crypto {
35 struct crypto_aead *tfm;
40 struct snp_guest_dev {
42 struct miscdevice misc;
45 struct snp_guest_crypto *crypto;
46 struct snp_guest_msg *request, *response;
47 struct snp_secrets_page_layout *layout;
48 struct snp_req_data input;
49 u32 *os_area_msg_seqno;
54 module_param(vmpck_id, uint, 0444);
55 MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
57 /* Mutex to serialize the shared buffer access and command handling. */
58 static DEFINE_MUTEX(snp_cmd_mutex);
60 static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
62 char zero_key[VMPCK_KEY_LEN] = {0};
65 return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
71 * If an error is received from the host or AMD Secure Processor (ASP) there
72 * are two options. Either retry the exact same encrypted request or discontinue
75 * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
76 * encrypt the requests. The IV for this scheme is the sequence number. GCM
77 * cannot tolerate IV reuse.
79 * The ASP FW v1.51 only increments the sequence numbers on a successful
80 * guest<->ASP back and forth and only accepts messages at its exact sequence
83 * So if the sequence number were to be reused the encryption scheme is
84 * vulnerable. If the sequence number were incremented for a fresh IV the ASP
85 * will reject the request.
87 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
89 dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
91 memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
92 snp_dev->vmpck = NULL;
95 static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
99 lockdep_assert_held(&snp_cmd_mutex);
101 /* Read the current message sequence counter from secrets pages */
102 count = *snp_dev->os_area_msg_seqno;
107 /* Return a non-zero on success */
108 static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
110 u64 count = __snp_get_msg_seqno(snp_dev);
113 * The message sequence counter for the SNP guest request is a 64-bit
114 * value but the version 2 of GHCB specification defines a 32-bit storage
115 * for it. If the counter exceeds the 32-bit value then return zero.
116 * The caller should check the return value, but if the caller happens to
117 * not check the value and use it, then the firmware treats zero as an
118 * invalid number and will fail the message request.
120 if (count >= UINT_MAX) {
121 dev_err(snp_dev->dev, "request message sequence counter overflow\n");
128 static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
131 * The counter is also incremented by the PSP, so increment it by 2
132 * and save in secrets page.
134 *snp_dev->os_area_msg_seqno += 2;
137 static inline struct snp_guest_dev *to_snp_dev(struct file *file)
139 struct miscdevice *dev = file->private_data;
141 return container_of(dev, struct snp_guest_dev, misc);
144 static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
146 struct snp_guest_crypto *crypto;
148 crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
152 crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
153 if (IS_ERR(crypto->tfm))
156 if (crypto_aead_setkey(crypto->tfm, key, keylen))
159 crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
160 crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
164 if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
165 if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
166 dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
171 crypto->a_len = crypto_aead_authsize(crypto->tfm);
172 crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
173 if (!crypto->authtag)
179 kfree(crypto->authtag);
183 crypto_free_aead(crypto->tfm);
190 static void deinit_crypto(struct snp_guest_crypto *crypto)
192 crypto_free_aead(crypto->tfm);
194 kfree(crypto->authtag);
198 static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
199 u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
201 struct snp_guest_msg_hdr *hdr = &msg->hdr;
202 struct scatterlist src[3], dst[3];
203 DECLARE_CRYPTO_WAIT(wait);
204 struct aead_request *req;
207 req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
212 * AEAD memory operations:
213 * +------ AAD -------+------- DATA -----+---- AUTHTAG----+
214 * | msg header | plaintext | hdr->authtag |
215 * | bytes 30h - 5Fh | or | |
217 * +------------------+------------------+----------------+
219 sg_init_table(src, 3);
220 sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
221 sg_set_buf(&src[1], src_buf, hdr->msg_sz);
222 sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
224 sg_init_table(dst, 3);
225 sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
226 sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
227 sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
229 aead_request_set_ad(req, AAD_LEN);
230 aead_request_set_tfm(req, crypto->tfm);
231 aead_request_set_callback(req, 0, crypto_req_done, &wait);
233 aead_request_set_crypt(req, src, dst, len, crypto->iv);
234 ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
236 aead_request_free(req);
240 static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
241 void *plaintext, size_t len)
243 struct snp_guest_crypto *crypto = snp_dev->crypto;
244 struct snp_guest_msg_hdr *hdr = &msg->hdr;
246 memset(crypto->iv, 0, crypto->iv_len);
247 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
249 return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
252 static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
253 void *plaintext, size_t len)
255 struct snp_guest_crypto *crypto = snp_dev->crypto;
256 struct snp_guest_msg_hdr *hdr = &msg->hdr;
258 /* Build IV with response buffer sequence number */
259 memset(crypto->iv, 0, crypto->iv_len);
260 memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
262 return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
265 static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
267 struct snp_guest_crypto *crypto = snp_dev->crypto;
268 struct snp_guest_msg *resp = snp_dev->response;
269 struct snp_guest_msg *req = snp_dev->request;
270 struct snp_guest_msg_hdr *req_hdr = &req->hdr;
271 struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
273 dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
274 resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
276 /* Verify that the sequence counter is incremented by 1 */
277 if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
280 /* Verify response message type and version number. */
281 if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
282 resp_hdr->msg_version != req_hdr->msg_version)
286 * If the message size is greater than our buffer length then return
289 if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
292 /* Decrypt the payload */
293 return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
296 static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
297 void *payload, size_t sz)
299 struct snp_guest_msg *req = snp_dev->request;
300 struct snp_guest_msg_hdr *hdr = &req->hdr;
302 memset(req, 0, sizeof(*req));
304 hdr->algo = SNP_AEAD_AES_256_GCM;
305 hdr->hdr_version = MSG_HDR_VER;
306 hdr->hdr_sz = sizeof(*hdr);
307 hdr->msg_type = type;
308 hdr->msg_version = version;
309 hdr->msg_seqno = seqno;
310 hdr->msg_vmpck = vmpck_id;
313 /* Verify the sequence number is non-zero */
317 dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
318 hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
320 return __enc_payload(snp_dev, req, payload, sz);
323 static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
324 u8 type, void *req_buf, size_t req_sz, void *resp_buf,
325 u32 resp_sz, __u64 *fw_err)
331 /* Get message sequence and verify that its a non-zero */
332 seqno = snp_get_msg_seqno(snp_dev);
336 memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
338 /* Encrypt the userspace provided payload */
339 rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
344 * Call firmware to process the request. In this function the encrypted
345 * message enters shared memory with the host. So after this call the
346 * sequence number must be incremented or the VMPCK must be deleted to
347 * prevent reuse of the IV.
349 rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
352 * If the extended guest request fails due to having too small of a
353 * certificate data buffer, retry the same guest request without the
354 * extended data request in order to increment the sequence number
355 * and thus avoid IV reuse.
357 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
358 err == SNP_GUEST_REQ_INVALID_LEN) {
359 const unsigned int certs_npages = snp_dev->input.data_npages;
361 exit_code = SVM_VMGEXIT_GUEST_REQUEST;
364 * If this call to the firmware succeeds, the sequence number can
365 * be incremented allowing for continued use of the VMPCK. If
366 * there is an error reflected in the return value, this value
367 * is checked further down and the result will be the deletion
368 * of the VMPCK and the error code being propagated back to the
369 * user as an ioctl() return code.
371 rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
374 * Override the error to inform callers the given extended
375 * request buffer size was too small and give the caller the
376 * required buffer size.
378 err = SNP_GUEST_REQ_INVALID_LEN;
379 snp_dev->input.data_npages = certs_npages;
386 dev_alert(snp_dev->dev,
387 "Detected error from ASP request. rc: %d, fw_err: %llu\n",
392 rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
394 dev_alert(snp_dev->dev,
395 "Detected unexpected decode failure from ASP. rc: %d\n",
400 /* Increment to new message sequence after payload decryption was successful. */
401 snp_inc_msg_seqno(snp_dev);
406 snp_disable_vmpck(snp_dev);
410 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
412 struct snp_guest_crypto *crypto = snp_dev->crypto;
413 struct snp_report_resp *resp;
414 struct snp_report_req req;
417 lockdep_assert_held(&snp_cmd_mutex);
419 if (!arg->req_data || !arg->resp_data)
422 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
426 * The intermediate response buffer is used while decrypting the
427 * response payload. Make sure that it has enough space to cover the
430 resp_len = sizeof(resp->data) + crypto->a_len;
431 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
435 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
436 SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
437 resp_len, &arg->fw_err);
441 if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
449 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
451 struct snp_guest_crypto *crypto = snp_dev->crypto;
452 struct snp_derived_key_resp resp = {0};
453 struct snp_derived_key_req req;
455 /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
458 lockdep_assert_held(&snp_cmd_mutex);
460 if (!arg->req_data || !arg->resp_data)
464 * The intermediate response buffer is used while decrypting the
465 * response payload. Make sure that it has enough space to cover the
468 resp_len = sizeof(resp.data) + crypto->a_len;
469 if (sizeof(buf) < resp_len)
472 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
475 rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg->msg_version,
476 SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len,
481 memcpy(resp.data, buf, sizeof(resp.data));
482 if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
485 /* The response buffer contains the sensitive data, explicitly clear it. */
486 memzero_explicit(buf, sizeof(buf));
487 memzero_explicit(&resp, sizeof(resp));
491 static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
493 struct snp_guest_crypto *crypto = snp_dev->crypto;
494 struct snp_ext_report_req req;
495 struct snp_report_resp *resp;
496 int ret, npages = 0, resp_len;
498 lockdep_assert_held(&snp_cmd_mutex);
500 if (!arg->req_data || !arg->resp_data)
503 if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
506 /* userspace does not want certificate data */
507 if (!req.certs_len || !req.certs_address)
510 if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
511 !IS_ALIGNED(req.certs_len, PAGE_SIZE))
514 if (!access_ok((const void __user *)req.certs_address, req.certs_len))
518 * Initialize the intermediate buffer with all zeros. This buffer
519 * is used in the guest request message to get the certs blob from
520 * the host. If host does not supply any certs in it, then copy
521 * zeros to indicate that certificate data was not provided.
523 memset(snp_dev->certs_data, 0, req.certs_len);
524 npages = req.certs_len >> PAGE_SHIFT;
527 * The intermediate response buffer is used while decrypting the
528 * response payload. Make sure that it has enough space to cover the
531 resp_len = sizeof(resp->data) + crypto->a_len;
532 resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
536 snp_dev->input.data_npages = npages;
537 ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg->msg_version,
538 SNP_MSG_REPORT_REQ, &req.data,
539 sizeof(req.data), resp->data, resp_len, &arg->fw_err);
541 /* If certs length is invalid then copy the returned length */
542 if (arg->fw_err == SNP_GUEST_REQ_INVALID_LEN) {
543 req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
545 if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
553 copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
559 if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
567 static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
569 struct snp_guest_dev *snp_dev = to_snp_dev(file);
570 void __user *argp = (void __user *)arg;
571 struct snp_guest_request_ioctl input;
574 if (copy_from_user(&input, argp, sizeof(input)))
579 /* Message version must be non-zero */
580 if (!input.msg_version)
583 mutex_lock(&snp_cmd_mutex);
585 /* Check if the VMPCK is not empty */
586 if (is_vmpck_empty(snp_dev)) {
587 dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
588 mutex_unlock(&snp_cmd_mutex);
594 ret = get_report(snp_dev, &input);
596 case SNP_GET_DERIVED_KEY:
597 ret = get_derived_key(snp_dev, &input);
599 case SNP_GET_EXT_REPORT:
600 ret = get_ext_report(snp_dev, &input);
606 mutex_unlock(&snp_cmd_mutex);
608 if (input.fw_err && copy_to_user(argp, &input, sizeof(input)))
614 static void free_shared_pages(void *buf, size_t sz)
616 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
622 ret = set_memory_encrypted((unsigned long)buf, npages);
624 WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
628 __free_pages(virt_to_page(buf), get_order(sz));
631 static void *alloc_shared_pages(struct device *dev, size_t sz)
633 unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
637 page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
641 ret = set_memory_decrypted((unsigned long)page_address(page), npages);
643 dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
644 __free_pages(page, get_order(sz));
648 return page_address(page);
651 static const struct file_operations snp_guest_fops = {
652 .owner = THIS_MODULE,
653 .unlocked_ioctl = snp_guest_ioctl,
656 static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno)
662 *seqno = &layout->os_area.msg_seqno_0;
663 key = layout->vmpck0;
666 *seqno = &layout->os_area.msg_seqno_1;
667 key = layout->vmpck1;
670 *seqno = &layout->os_area.msg_seqno_2;
671 key = layout->vmpck2;
674 *seqno = &layout->os_area.msg_seqno_3;
675 key = layout->vmpck3;
684 static int __init sev_guest_probe(struct platform_device *pdev)
686 struct snp_secrets_page_layout *layout;
687 struct sev_guest_platform_data *data;
688 struct device *dev = &pdev->dev;
689 struct snp_guest_dev *snp_dev;
690 struct miscdevice *misc;
691 void __iomem *mapping;
694 if (!dev->platform_data)
697 data = (struct sev_guest_platform_data *)dev->platform_data;
698 mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
702 layout = (__force void *)mapping;
705 snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
710 snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno);
711 if (!snp_dev->vmpck) {
712 dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
716 /* Verify that VMPCK is not zero. */
717 if (is_vmpck_empty(snp_dev)) {
718 dev_err(dev, "vmpck id %d is null\n", vmpck_id);
722 platform_set_drvdata(pdev, snp_dev);
724 snp_dev->layout = layout;
726 /* Allocate the shared page used for the request and response message. */
727 snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
728 if (!snp_dev->request)
731 snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
732 if (!snp_dev->response)
735 snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
736 if (!snp_dev->certs_data)
737 goto e_free_response;
740 snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
741 if (!snp_dev->crypto)
742 goto e_free_cert_data;
744 misc = &snp_dev->misc;
745 misc->minor = MISC_DYNAMIC_MINOR;
746 misc->name = DEVICE_NAME;
747 misc->fops = &snp_guest_fops;
749 /* initial the input address for guest request */
750 snp_dev->input.req_gpa = __pa(snp_dev->request);
751 snp_dev->input.resp_gpa = __pa(snp_dev->response);
752 snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
754 ret = misc_register(misc);
756 goto e_free_cert_data;
758 dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
762 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
764 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
766 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
772 static int __exit sev_guest_remove(struct platform_device *pdev)
774 struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
776 free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
777 free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
778 free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
779 deinit_crypto(snp_dev->crypto);
780 misc_deregister(&snp_dev->misc);
786 * This driver is meant to be a common SEV guest interface driver and to
787 * support any SEV guest API. As such, even though it has been introduced
788 * with the SEV-SNP support, it is named "sev-guest".
790 static struct platform_driver sev_guest_driver = {
791 .remove = __exit_p(sev_guest_remove),
797 module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
799 MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
800 MODULE_LICENSE("GPL");
801 MODULE_VERSION("1.0.0");
802 MODULE_DESCRIPTION("AMD SEV Guest Driver");