- Add a mitigation for a cache coherency vulnerability when running an

SNP guest which makes sure all cache lines belonging to a 4K page are
   evicted after latter has been converted to a guest-private page.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmiTjlwACgkQEsHwGGHe
 VUrNQBAAtVZqCIfX3MFyhrBu6k4laMDNjKMneX2JregOv42ha/pjuJ/S8mNQhG3J
 0vLGcPmW1y8hNtm3+hhnt2xo7fNpwgReXkYrjmQPfIWrYHVgdyTz+Rwsacf7D8Yj
 p15XrFz+8w7SHI095ZcvJTc/rAxUbGkAXqYE6p7gj04iJtSe4FpdZZw9fW8IPooE
 hZypWG4aAJdsFn4qmIsIh/2QYiCuVS9yhJuUX43K7Ft2xl5jXUYulu5yyWyBZC6r
 OOe6Uc+nc5dDLa+U5ileqgXGf23LgE7LxbsVpKs5OetJmZhAjGTY+go6U/Zm03FU
 bsRQG8OumitgeUd+tjwtl/KNCu03DggQ8a1cqTt09leOkejQ9mB/ArZHuBWjIR7x
 hPX8mlOIhCWkOP6S8DnnI53baGz/OgGTvWP1Ehuv2WaM1ip7fDduhENK0TWY0Hx+
 mTGG1HA5zILnYSGh+3hr5xC+YDNgvMR1uSWhG2zARrs98rgP3MC8P2kcrmSpLOFF
 BFJ8i2TSppr6mDdnRspgq3jn3MYKaPQxjG4OJQEawqkfzfsnw4+kxGYFK/9YaeB7
 KqqtU/4GPh65y/LsELXPp/NlPaZ6LXjLoLj2IwDEHNBl2iBiRmH+FDSFL0Hsxb/c
 mMW0V2RA+OwzktARXMazXAqM7hR8SStnkW38SrT/YSrRnRFFGSg=
 =8kfV
 -----END PGP SIGNATURE-----

Merge tag 'snp_cache_coherency' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

 - Add a mitigation for a cache coherency vulnerability when running an
   SNP guest which makes sure all cache lines belonging to a 4K page are
   evicted after latter has been converted to a guest-private page

[ SNP: Secure Nested Paging - not to be confused with Single Nucleotide
  Polymorphism, which is the more common use of that TLA. I am on a
  mission to write out the more obscure TLAs in order to keep track of
  them.

  Because while math tells us that there are only about 17k different
  combinations of three-letter acronyms using English letters (26^3), I
  am convinced that somehow Intel, AMD and ARM have together figured out
  new mathematics, and have at least a million different TLAs that they
  use.   - Linus ]

* tag 'snp_cache_coherency' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/sev: Evict cache lines during SNP memory validation
This commit is contained in:
Linus Torvalds 2025-08-12 08:19:23 -07:00
commit 20e0d85764
6 changed files with 62 additions and 0 deletions

View file

@ -106,5 +106,18 @@ void get_cpuflags(void)
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
&cpu.flags[1]);
}
if (max_amd_level >= 0x8000001f) {
u32 ebx;
/*
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
* the virtualization flags entry (word 8) and set by
* scattered.c, so the bit needs to be explicitly set.
*/
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
if (ebx & BIT(31))
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
}
}
}

View file

@ -810,6 +810,13 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
if (ret)
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
}
/*
* If validating memory (making it private) and affected by the
* cache-coherency vulnerability, perform the cache eviction mitigation.
*/
if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
sev_evict_cache((void *)vaddr, 1);
}
/*

View file

@ -358,10 +358,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
static void pvalidate_pages(struct snp_psc_desc *desc)
{
struct psc_entry *e;
unsigned int i;
if (snp_vmpl)
svsm_pval_pages(desc);
else
pval_pages(desc);
/*
* If not affected by the cache-coherency vulnerability there is no need
* to perform the cache eviction mitigation.
*/
if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
return;
for (i = 0; i <= desc->hdr.end_entry; i++) {
e = &desc->entries[i];
/*
* If validating memory (making it private) perform the cache
* eviction mitigation.
*/
if (e->operation == SNP_PAGE_STATE_PRIVATE)
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
}
}
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)

View file

@ -218,6 +218,7 @@
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */

View file

@ -619,6 +619,24 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
void snp_fixup_e820_tables(void);
static inline void sev_evict_cache(void *va, int npages)
{
volatile u8 val __always_unused;
u8 *bytes = va;
int page_idx;
/*
* For SEV guests, a read from the first/last cache-lines of a 4K page
* using the guest key is sufficient to cause a flush of all cache-lines
* associated with that 4K page without incurring all the overhead of a
* full CLFLUSH sequence.
*/
for (page_idx = 0; page_idx < npages; page_idx++) {
val = bytes[page_idx * PAGE_SIZE];
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
}
}
#else
static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_rmptable_init(void) { return -ENOSYS; }
@ -634,6 +652,7 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
static inline void snp_fixup_e820_tables(void) {}
static inline void sev_evict_cache(void *va, int npages) {}
#endif
#endif

View file

@ -48,6 +48,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },