2 * AMD Memory Encryption Support
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define DISABLE_BRANCH_PROFILING
15 #include <linux/linkage.h>
16 #include <linux/init.h>
18 #include <linux/dma-direct.h>
19 #include <linux/swiotlb.h>
20 #include <linux/mem_encrypt.h>
22 #include <asm/tlbflush.h>
23 #include <asm/fixmap.h>
24 #include <asm/setup.h>
25 #include <asm/bootparam.h>
26 #include <asm/set_memory.h>
27 #include <asm/cacheflush.h>
28 #include <asm/processor-flags.h>
30 #include <asm/cmdline.h>
32 #include "mm_internal.h"
35 * Since SME related variables are set early in the boot process they must
36 * reside in the .data section so as not to be zeroed out when the .bss
37 * section is later cleared.
39 u64 sme_me_mask __section(.data) = 0;
40 EXPORT_SYMBOL(sme_me_mask);
41 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
42 EXPORT_SYMBOL_GPL(sev_enable_key);
44 bool sev_enabled __section(.data);
46 /* Buffer used for early in-place encryption by BSP, no locking needed */
47 static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
50 * This routine does not change the underlying encryption setting of the
51 * page(s) that map this memory. It assumes that eventually the memory is
52 * meant to be accessed as either encrypted or decrypted but the contents
53 * are currently not in the desired state.
55 * This routine follows the steps outlined in the AMD64 Architecture
56 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
58 static void __init __sme_early_enc_dec(resource_size_t paddr,
59 unsigned long size, bool enc)
70 * There are limited number of early mapping slots, so map (at most)
74 len = min_t(size_t, sizeof(sme_early_buffer), size);
77 * Create mappings for the current and desired format of
78 * the memory. Use a write-protected mapping for the source.
80 src = enc ? early_memremap_decrypted_wp(paddr, len) :
81 early_memremap_encrypted_wp(paddr, len);
83 dst = enc ? early_memremap_encrypted(paddr, len) :
84 early_memremap_decrypted(paddr, len);
87 * If a mapping can't be obtained to perform the operation,
88 * then eventual access of that area in the desired mode
94 * Use a temporary buffer, of cache-line multiple size, to
95 * avoid data corruption as documented in the APM.
97 memcpy(sme_early_buffer, src, len);
98 memcpy(dst, sme_early_buffer, len);
100 early_memunmap(dst, len);
101 early_memunmap(src, len);
108 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
110 __sme_early_enc_dec(paddr, size, true);
113 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
115 __sme_early_enc_dec(paddr, size, false);
118 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
121 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
122 pmdval_t pmd_flags, pmd;
124 /* Use early_pmd_flags but remove the encryption mask */
125 pmd_flags = __sme_clr(early_pmd_flags);
128 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
129 __early_make_pgtable((unsigned long)vaddr, pmd);
133 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
136 __native_flush_tlb();
139 void __init sme_unmap_bootdata(char *real_mode_data)
141 struct boot_params *boot_data;
142 unsigned long cmdline_paddr;
147 /* Get the command line address before unmapping the real_mode_data */
148 boot_data = (struct boot_params *)real_mode_data;
149 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
151 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
156 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
159 void __init sme_map_bootdata(char *real_mode_data)
161 struct boot_params *boot_data;
162 unsigned long cmdline_paddr;
167 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
169 /* Get the command line address after mapping the real_mode_data */
170 boot_data = (struct boot_params *)real_mode_data;
171 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
176 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
179 void __init sme_early_init(void)
186 early_pmd_flags = __sme_set(early_pmd_flags);
188 __supported_pte_mask = __sme_set(__supported_pte_mask);
190 /* Update the protection map with memory encryption mask */
191 for (i = 0; i < ARRAY_SIZE(protection_map); i++)
192 protection_map[i] = pgprot_encrypted(protection_map[i]);
195 swiotlb_force = SWIOTLB_FORCE;
198 static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
199 gfp_t gfp, unsigned long attrs)
201 unsigned long dma_mask;
206 dma_mask = dma_alloc_coherent_mask(dev, gfp);
207 order = get_order(size);
210 * Memory will be memset to zero after marking decrypted, so don't
211 * bother clearing it before.
215 page = alloc_pages_node(dev_to_node(dev), gfp, order);
220 * Since we will be clearing the encryption bit, check the
221 * mask with it already cleared.
223 addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
224 if ((addr + size) > dma_mask) {
225 __free_pages(page, get_order(size));
227 vaddr = page_address(page);
233 vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
238 /* Clear the SME encryption bit for DMA use if not swiotlb area */
239 if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
240 set_memory_decrypted((unsigned long)vaddr, 1 << order);
241 memset(vaddr, 0, PAGE_SIZE << order);
242 *dma_handle = __sme_clr(*dma_handle);
248 static void sev_free(struct device *dev, size_t size, void *vaddr,
249 dma_addr_t dma_handle, unsigned long attrs)
251 /* Set the SME encryption bit for re-use if not swiotlb area */
252 if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
253 set_memory_encrypted((unsigned long)vaddr,
254 1 << get_order(size));
256 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
259 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
261 pgprot_t old_prot, new_prot;
262 unsigned long pfn, pa, size;
267 pfn = pte_pfn(*kpte);
268 old_prot = pte_pgprot(*kpte);
271 pfn = pmd_pfn(*(pmd_t *)kpte);
272 old_prot = pmd_pgprot(*(pmd_t *)kpte);
275 pfn = pud_pfn(*(pud_t *)kpte);
276 old_prot = pud_pgprot(*(pud_t *)kpte);
284 pgprot_val(new_prot) |= _PAGE_ENC;
286 pgprot_val(new_prot) &= ~_PAGE_ENC;
288 /* If prot is same then do nothing. */
289 if (pgprot_val(old_prot) == pgprot_val(new_prot))
292 pa = pfn << page_level_shift(level);
293 size = page_level_size(level);
296 * We are going to perform in-place en-/decryption and change the
297 * physical page attribute from C=1 to C=0 or vice versa. Flush the
298 * caches to ensure that data gets accessed with the correct C-bit.
300 clflush_cache_range(__va(pa), size);
302 /* Encrypt/decrypt the contents in-place */
304 sme_early_encrypt(pa, size);
306 sme_early_decrypt(pa, size);
308 /* Change the page encryption mask. */
309 new_pte = pfn_pte(pfn, new_prot);
310 set_pte_atomic(kpte, new_pte);
313 static int __init early_set_memory_enc_dec(unsigned long vaddr,
314 unsigned long size, bool enc)
316 unsigned long vaddr_end, vaddr_next;
317 unsigned long psize, pmask;
318 int split_page_size_mask;
323 vaddr_end = vaddr + size;
325 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
326 kpte = lookup_address(vaddr, &level);
327 if (!kpte || pte_none(*kpte)) {
332 if (level == PG_LEVEL_4K) {
333 __set_clr_pte_enc(kpte, level, enc);
334 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
338 psize = page_level_size(level);
339 pmask = page_level_mask(level);
342 * Check whether we can change the large page in one go.
343 * We request a split when the address is not aligned and
344 * the number of pages to set/clear encryption bit is smaller
345 * than the number of pages in the large page.
347 if (vaddr == (vaddr & pmask) &&
348 ((vaddr_end - vaddr) >= psize)) {
349 __set_clr_pte_enc(kpte, level, enc);
350 vaddr_next = (vaddr & pmask) + psize;
355 * The virtual address is part of a larger page, create the next
356 * level page table mapping (4K or 2M). If it is part of a 2M
357 * page then we request a split of the large page into 4K
358 * chunks. A 1GB large page is split into 2M pages, resp.
360 if (level == PG_LEVEL_2M)
361 split_page_size_mask = 0;
363 split_page_size_mask = 1 << PG_LEVEL_2M;
365 kernel_physical_mapping_init(__pa(vaddr & pmask),
366 __pa((vaddr_end & pmask) + psize),
367 split_page_size_mask);
377 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
379 return early_set_memory_enc_dec(vaddr, size, false);
382 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
384 return early_set_memory_enc_dec(vaddr, size, true);
388 * SME and SEV are very similar but they are not the same, so there are
389 * times that the kernel will need to distinguish between SME and SEV. The
390 * sme_active() and sev_active() functions are used for this. When a
391 * distinction isn't needed, the mem_encrypt_active() function can be used.
393 * The trampoline code is a good example for this requirement. Before
394 * paging is activated, SME will access all memory as decrypted, but SEV
395 * will access all memory as encrypted. So, when APs are being brought
396 * up under SME the trampoline area cannot be encrypted, whereas under SEV
397 * the trampoline area must be encrypted.
399 bool sme_active(void)
401 return sme_me_mask && !sev_enabled;
403 EXPORT_SYMBOL(sme_active);
405 bool sev_active(void)
407 return sme_me_mask && sev_enabled;
409 EXPORT_SYMBOL(sev_active);
411 static const struct dma_map_ops sev_dma_ops = {
414 .map_page = swiotlb_map_page,
415 .unmap_page = swiotlb_unmap_page,
416 .map_sg = swiotlb_map_sg_attrs,
417 .unmap_sg = swiotlb_unmap_sg_attrs,
418 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
419 .sync_single_for_device = swiotlb_sync_single_for_device,
420 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
421 .sync_sg_for_device = swiotlb_sync_sg_for_device,
422 .mapping_error = swiotlb_dma_mapping_error,
425 /* Architecture __weak replacement functions */
426 void __init mem_encrypt_init(void)
431 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
432 swiotlb_update_mem_attributes();
435 * With SEV, DMA operations cannot use encryption. New DMA ops
436 * are required in order to mark the DMA areas as decrypted or
437 * to use bounce buffers.
440 dma_ops = &sev_dma_ops;
443 * With SEV, we need to unroll the rep string I/O instructions.
446 static_branch_enable(&sev_enable_key);
448 pr_info("AMD %s active\n",
449 sev_active() ? "Secure Encrypted Virtualization (SEV)"
450 : "Secure Memory Encryption (SME)");
453 void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
455 WARN(PAGE_ALIGN(size) != size,
456 "size is not page-aligned (%#lx)\n", size);
458 /* Make the SWIOTLB buffer area decrypted */
459 set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);