Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 15 May 2019 10:24:57 +0000 (UTC)
From:      =?UTF-8?Q?Roger_Pau_Monn=c3=a9?= <royger@FreeBSD.org>
To:        ports-committers@freebsd.org, svn-ports-all@freebsd.org, svn-ports-head@freebsd.org
Subject:   svn commit: r501706 - in head: emulators/xen-kernel emulators/xen-kernel/files sysutils/xen-tools sysutils/xen-tools/files
Message-ID:  <201905151024.x4FAOvbq015331@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: royger (src committer)
Date: Wed May 15 10:24:57 2019
New Revision: 501706
URL: https://svnweb.freebsd.org/changeset/ports/501706

Log:
  xen: add fixes for XSA-297 (MDS)
  
  Sponsored by:		Citrix Systems R&D
  Reviewed by:		mat
  Differential revision:	https://reviews.freebsd.org/D20265

Added:
  head/emulators/xen-kernel/files/
  head/emulators/xen-kernel/files/0001-x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch   (contents, props changed)
  head/emulators/xen-kernel/files/0001-x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch   (contents, props changed)
  head/emulators/xen-kernel/files/0002-x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch   (contents, props changed)
  head/emulators/xen-kernel/files/xsa297-4.12-1.patch   (contents, props changed)
  head/emulators/xen-kernel/files/xsa297-4.12-2.patch   (contents, props changed)
  head/emulators/xen-kernel/files/xsa297-4.12-3.patch   (contents, props changed)
  head/emulators/xen-kernel/files/xsa297-4.12-4.patch   (contents, props changed)
  head/emulators/xen-kernel/files/xsa297-4.12-5.patch   (contents, props changed)
  head/emulators/xen-kernel/files/xsa297-4.12-6.patch   (contents, props changed)
  head/emulators/xen-kernel/files/xsa297-4.12-7.patch   (contents, props changed)
  head/sysutils/xen-tools/files/0001-x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch   (contents, props changed)
  head/sysutils/xen-tools/files/0001-x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch   (contents, props changed)
  head/sysutils/xen-tools/files/0002-x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch   (contents, props changed)
  head/sysutils/xen-tools/files/xsa297-4.12-1.patch   (contents, props changed)
  head/sysutils/xen-tools/files/xsa297-4.12-2.patch   (contents, props changed)
  head/sysutils/xen-tools/files/xsa297-4.12-3.patch   (contents, props changed)
  head/sysutils/xen-tools/files/xsa297-4.12-4.patch   (contents, props changed)
  head/sysutils/xen-tools/files/xsa297-4.12-5.patch   (contents, props changed)
  head/sysutils/xen-tools/files/xsa297-4.12-6.patch   (contents, props changed)
  head/sysutils/xen-tools/files/xsa297-4.12-7.patch   (contents, props changed)
Modified:
  head/emulators/xen-kernel/Makefile
  head/sysutils/xen-tools/Makefile

Modified: head/emulators/xen-kernel/Makefile
==============================================================================
--- head/emulators/xen-kernel/Makefile	Wed May 15 10:02:37 2019	(r501705)
+++ head/emulators/xen-kernel/Makefile	Wed May 15 10:24:57 2019	(r501706)
@@ -2,7 +2,7 @@
 
 PORTNAME=	xen
 PORTVERSION=	4.12.0
-PORTREVISION=	0
+PORTREVISION=	1
 CATEGORIES=	emulators
 MASTER_SITES=	http://downloads.xenproject.org/release/xen/${PORTVERSION}/
 PKGNAMESUFFIX=	-kernel
@@ -23,6 +23,20 @@ NO_MTREE=	yes
 STRIP=		#
 PLIST_FILES=	/boot/xen \
 		lib/debug/boot/xen.debug
+
+# Pre-patches for XSA-297 to apply cleanly
+EXTRA_PATCHES+=	${PATCHDIR}/0001-x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch:-p1 \
+		${PATCHDIR}/0001-x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch:-p1 \
+		${PATCHDIR}/0002-x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch:-p1
+
+# Fixes for XSA-297 (MDS)
+EXTRA_PATCHES+=	${PATCHDIR}/xsa297-4.12-1.patch:-p1 \
+		${PATCHDIR}/xsa297-4.12-2.patch:-p1 \
+		${PATCHDIR}/xsa297-4.12-3.patch:-p1 \
+		${PATCHDIR}/xsa297-4.12-4.patch:-p1 \
+		${PATCHDIR}/xsa297-4.12-5.patch:-p1 \
+		${PATCHDIR}/xsa297-4.12-6.patch:-p1 \
+		${PATCHDIR}/xsa297-4.12-7.patch:-p1
 
 .include <bsd.port.options.mk>
 

Added: head/emulators/xen-kernel/files/0001-x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/0001-x86-msr-Shorten-ARCH_CAPABILITIES_-constants.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,71 @@
+From e25d1338e1d8a32e46c808321323c4ad8fc5ba01 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 3 May 2019 10:44:58 +0200
+Subject: [PATCH 1/2] x86/msr: Shorten ARCH_CAPABILITIES_* constants
+
+They are unnecesserily verbose, and ARCH_CAPS_* is already the more common
+version.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+master commit: ba27aaa88548c824a47dcf5609288ee1c05d2946
+master date: 2019-03-18 16:26:40 +0000
+---
+ xen/arch/x86/spec_ctrl.c        | 10 +++++-----
+ xen/include/asm-x86/msr-index.h |  4 ++--
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index ad72ecd3a5..22bfc5a5e8 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -230,8 +230,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
+            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
+            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
+-           (caps & ARCH_CAPABILITIES_IBRS_ALL)      ? " IBRS_ALL"  : "",
+-           (caps & ARCH_CAPABILITIES_RDCL_NO)       ? " RDCL_NO"   : "",
++           (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
++           (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
+            (caps & ARCH_CAPS_RSBA)                  ? " RSBA"      : "",
+            (caps & ARCH_CAPS_SKIP_L1DFL)            ? " SKIP_L1DFL": "",
+            (caps & ARCH_CAPS_SSB_NO)                ? " SSB_NO"    : "");
+@@ -549,7 +549,7 @@ static __init void l1tf_calculations(uint64_t caps)
+     }
+ 
+     /* Any processor advertising RDCL_NO should be not vulnerable to L1TF. */
+-    if ( caps & ARCH_CAPABILITIES_RDCL_NO )
++    if ( caps & ARCH_CAPS_RDCL_NO )
+         cpu_has_bug_l1tf = false;
+ 
+     if ( cpu_has_bug_l1tf && hit_default )
+@@ -613,9 +613,9 @@ int8_t __read_mostly opt_xpti_domu = -1;
+ static __init void xpti_init_default(uint64_t caps)
+ {
+     if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+-        caps = ARCH_CAPABILITIES_RDCL_NO;
++        caps = ARCH_CAPS_RDCL_NO;
+ 
+-    if ( caps & ARCH_CAPABILITIES_RDCL_NO )
++    if ( caps & ARCH_CAPS_RDCL_NO )
+     {
+         if ( opt_xpti_hwdom < 0 )
+             opt_xpti_hwdom = 0;
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index c6e1d8768f..11512d4250 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -42,8 +42,8 @@
+ #define PRED_CMD_IBPB			(_AC(1, ULL) << 0)
+ 
+ #define MSR_ARCH_CAPABILITIES		0x0000010a
+-#define ARCH_CAPABILITIES_RDCL_NO	(_AC(1, ULL) << 0)
+-#define ARCH_CAPABILITIES_IBRS_ALL	(_AC(1, ULL) << 1)
++#define ARCH_CAPS_RDCL_NO		(_AC(1, ULL) << 0)
++#define ARCH_CAPS_IBRS_ALL		(_AC(1, ULL) << 1)
+ #define ARCH_CAPS_RSBA			(_AC(1, ULL) << 2)
+ #define ARCH_CAPS_SKIP_L1DFL		(_AC(1, ULL) << 3)
+ #define ARCH_CAPS_SSB_NO		(_AC(1, ULL) << 4)
+-- 
+2.17.2 (Apple Git-113)
+

Added: head/emulators/xen-kernel/files/0001-x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/0001-x86-tsx-Implement-controls-for-RTM-force-abort-mode.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,185 @@
+From e3a1ebee98e61dfe3b8b1eb71043ad5220b3cfd3 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 3 May 2019 10:39:29 +0200
+Subject: [PATCH] x86/tsx: Implement controls for RTM force-abort mode
+
+The CPUID bit and MSR are deliberately not exposed to guests, because they
+won't exist on newer processors.  As vPMU isn't security supported, the
+misbehaviour of PCR3 isn't expected to impact production deployments.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+master commit: 6be613f29b4205349275d24367bd4c82fb2960dd
+master date: 2019-03-12 17:05:21 +0000
+---
+ docs/misc/xen-command-line.pandoc           | 17 ++++++++++++++++-
+ tools/misc/xen-cpuid.c                      |  2 ++
+ xen/arch/x86/cpu/intel.c                    |  3 +++
+ xen/arch/x86/cpu/vpmu.c                     |  3 +++
+ xen/arch/x86/msr.c                          |  4 ++++
+ xen/include/asm-x86/cpufeature.h            |  3 +++
+ xen/include/asm-x86/msr-index.h             |  3 +++
+ xen/include/asm-x86/vpmu.h                  |  1 +
+ xen/include/public/arch-x86/cpufeatureset.h |  1 +
+ 9 files changed, 36 insertions(+), 1 deletion(-)
+
+diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
+index 742555616d..6db82f302e 100644
+--- a/docs/misc/xen-command-line.pandoc
++++ b/docs/misc/xen-command-line.pandoc
+@@ -2109,7 +2109,7 @@ Use Virtual Processor ID support if available.  This prevents the need for TLB
+ flushes on VM entry and exit, increasing performance.
+ 
+ ### vpmu (x86)
+-    = List of [ <bool>, bts, ipc, arch ]
++    = List of [ <bool>, bts, ipc, arch, rtm-abort=<bool> ]
+ 
+     Applicability: x86.  Default: false
+ 
+@@ -2142,6 +2142,21 @@ provide access to a wealth of low level processor information.
+ 
+ *   The `arch` option allows access to the pre-defined architectural events.
+ 
++*   The `rtm-abort` boolean controls a trade-off between working Restricted
++    Transactional Memory, and working performance counters.
++
++    All processors released to date (Q1 2019) supporting Transactional Memory
++    Extensions suffer an erratum which has been addressed in microcode.
++
++    Processors based on the Skylake microarchitecture with up-to-date
++    microcode internally use performance counter 3 to work around the erratum.
++    A consequence is that the counter gets reprogrammed whenever an `XBEGIN`
++    instruction is executed.
++
++    An alternative mode exists where PCR3 behaves as before, at the cost of
++    `XBEGIN` unconditionally aborting.  Enabling `rtm-abort` mode will
++    activate this alternative mode.
++
+ *Warning:*
+ As the virtualisation is not 100% safe, don't use the vpmu flag on
+ production systems (see http://xenbits.xen.org/xsa/advisory-163.html)!
+diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c
+index 6e7ca8b9a4..d87a72e3e6 100644
+--- a/tools/misc/xen-cpuid.c
++++ b/tools/misc/xen-cpuid.c
+@@ -146,6 +146,8 @@ static const char *str_7d0[32] =
+ {
+     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
+ 
++    /* 12 */                [13] = "tsx-force-abort",
++
+     [26] = "ibrsb",         [27] = "stibp",
+     [28] = "l1d_flush",     [29] = "arch_caps",
+     /* 30 */                [31] = "ssbd",
+diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
+index 65fa3d611f..29c6b87512 100644
+--- a/xen/arch/x86/cpu/intel.c
++++ b/xen/arch/x86/cpu/intel.c
+@@ -286,6 +286,9 @@ static void Intel_errata_workarounds(struct cpuinfo_x86 *c)
+ 	if (c->x86 == 6 && cpu_has_clflush &&
+ 	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+ 		__set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
++
++	if (cpu_has_tsx_force_abort && opt_rtm_abort)
++		wrmsrl(MSR_TSX_FORCE_ABORT, TSX_FORCE_ABORT_RTM);
+ }
+ 
+ 
+diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
+index 8324d62f11..8f6daf13fd 100644
+--- a/xen/arch/x86/cpu/vpmu.c
++++ b/xen/arch/x86/cpu/vpmu.c
+@@ -45,6 +45,7 @@ CHECK_pmu_params;
+ static unsigned int __read_mostly opt_vpmu_enabled;
+ unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF;
+ unsigned int __read_mostly vpmu_features = 0;
++bool __read_mostly opt_rtm_abort;
+ 
+ static DEFINE_SPINLOCK(vpmu_lock);
+ static unsigned vpmu_count;
+@@ -73,6 +74,8 @@ static int __init parse_vpmu_params(const char *s)
+             vpmu_features |= XENPMU_FEATURE_IPC_ONLY;
+         else if ( !cmdline_strcmp(s, "arch") )
+             vpmu_features |= XENPMU_FEATURE_ARCH_ONLY;
++        else if ( (val = parse_boolean("rtm-abort", s, ss)) >= 0 )
++            opt_rtm_abort = val;
+         else
+             rc = -EINVAL;
+ 
+diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
+index 9bb38b6d66..4df4a59f4d 100644
+--- a/xen/arch/x86/msr.c
++++ b/xen/arch/x86/msr.c
+@@ -131,6 +131,8 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+     case MSR_PRED_CMD:
+     case MSR_FLUSH_CMD:
+         /* Write-only */
++    case MSR_TSX_FORCE_ABORT:
++        /* Not offered to guests. */
+         goto gp_fault;
+ 
+     case MSR_SPEC_CTRL:
+@@ -230,6 +232,8 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+     case MSR_INTEL_PLATFORM_INFO:
+     case MSR_ARCH_CAPABILITIES:
+         /* Read-only */
++    case MSR_TSX_FORCE_ABORT:
++        /* Not offered to guests. */
+         goto gp_fault;
+ 
+     case MSR_AMD_PATCHLOADER:
+diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
+index 1fb9af4b19..745801f3c0 100644
+--- a/xen/include/asm-x86/cpufeature.h
++++ b/xen/include/asm-x86/cpufeature.h
+@@ -112,6 +112,9 @@
+ /* CPUID level 0x80000007.edx */
+ #define cpu_has_itsc            boot_cpu_has(X86_FEATURE_ITSC)
+ 
++/* CPUID level 0x00000007:0.edx */
++#define cpu_has_tsx_force_abort boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)
++
+ /* Synthesized. */
+ #define cpu_has_arch_perfmon    boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+ #define cpu_has_cpuid_faulting  boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index 24d783a72d..c6e1d8768f 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -51,6 +51,9 @@
+ #define MSR_FLUSH_CMD			0x0000010b
+ #define FLUSH_CMD_L1D			(_AC(1, ULL) << 0)
+ 
++#define MSR_TSX_FORCE_ABORT             0x0000010f
++#define TSX_FORCE_ABORT_RTM             (_AC(1, ULL) <<  0)
++
+ /* Intel MSRs. Some also available on other CPUs */
+ #define MSR_IA32_PERFCTR0		0x000000c1
+ #define MSR_IA32_A_PERFCTR0		0x000004c1
+diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
+index 5e778ab7ba..1287b9fb6e 100644
+--- a/xen/include/asm-x86/vpmu.h
++++ b/xen/include/asm-x86/vpmu.h
+@@ -125,6 +125,7 @@ static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+ 
+ extern unsigned int vpmu_mode;
+ extern unsigned int vpmu_features;
++extern bool opt_rtm_abort;
+ 
+ /* Context switch */
+ static inline void vpmu_switch_from(struct vcpu *prev)
+diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
+index fbc68fa29f..2bcc5487ac 100644
+--- a/xen/include/public/arch-x86/cpufeatureset.h
++++ b/xen/include/public/arch-x86/cpufeatureset.h
+@@ -242,6 +242,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /*A  IBPB support only (no IBRS, used by
+ /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
+ XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network Instructions */
+ XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation Single Precision */
++XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
+ XEN_CPUFEATURE(IBRSB,         9*32+26) /*A  IBRS and IBPB support (used by Intel) */
+ XEN_CPUFEATURE(STIBP,         9*32+27) /*A  STIBP */
+ XEN_CPUFEATURE(L1D_FLUSH,     9*32+28) /*S  MSR_FLUSH_CMD and L1D flush. */
+-- 
+2.17.2 (Apple Git-113)
+

Added: head/emulators/xen-kernel/files/0002-x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/0002-x86-spec-ctrl-Extend-repoline-safey-calcuations-for-.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,68 @@
+From fd2a34c9655acecaaa1541dd84fc670936303175 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 3 May 2019 10:45:45 +0200
+Subject: [PATCH 2/2] x86/spec-ctrl: Extend repoline safey calcuations for
+ eIBRS and Atom parts
+
+All currently-released Atom processors are in practice retpoline-safe, because
+they don't fall back to a BTB prediction on RSB underflow.
+
+However, an additional meaning of Enhanced IRBS is that the processor may not
+be retpoline-safe.  The Gemini Lake platform, based on the Goldmont Plus
+microarchitecture is the first Atom processor to support eIBRS.
+
+Until Xen gets full eIBRS support, Gemini Lake will still be safe using
+regular IBRS.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+master commit: 17f74242ccf0ce6e51c03a5860947865c0ef0dc2
+master date: 2019-03-18 16:26:40 +0000
+---
+ xen/arch/x86/spec_ctrl.c | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 22bfc5a5e8..1171c02ab1 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -316,8 +316,11 @@ static bool __init retpoline_safe(uint64_t caps)
+     /*
+      * RSBA may be set by a hypervisor to indicate that we may move to a
+      * processor which isn't retpoline-safe.
++     *
++     * Processors offering Enhanced IBRS are not guarenteed to be
++     * repoline-safe.
+      */
+-    if ( caps & ARCH_CAPS_RSBA )
++    if ( caps & (ARCH_CAPS_RSBA | ARCH_CAPS_IBRS_ALL) )
+         return false;
+ 
+     switch ( boot_cpu_data.x86_model )
+@@ -377,6 +380,23 @@ static bool __init retpoline_safe(uint64_t caps)
+     case 0x9e:
+         return false;
+ 
++        /*
++         * Atom processors before Goldmont Plus/Gemini Lake are retpoline-safe.
++         */
++    case 0x1c: /* Pineview */
++    case 0x26: /* Lincroft */
++    case 0x27: /* Penwell */
++    case 0x35: /* Cloverview */
++    case 0x36: /* Cedarview */
++    case 0x37: /* Baytrail / Valleyview (Silvermont) */
++    case 0x4d: /* Avaton / Rangely (Silvermont) */
++    case 0x4c: /* Cherrytrail / Brasswell */
++    case 0x4a: /* Merrifield */
++    case 0x5a: /* Moorefield */
++    case 0x5c: /* Goldmont */
++    case 0x5f: /* Denverton */
++        return true;
++
+     default:
+         printk("Unrecognised CPU model %#x - assuming not reptpoline safe\n",
+                boot_cpu_data.x86_model);
+-- 
+2.17.2 (Apple Git-113)
+

Added: head/emulators/xen-kernel/files/xsa297-4.12-1.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/xsa297-4.12-1.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,163 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Reposition the XPTI command line parsing logic
+
+It has ended up in the middle of the mitigation calculation logic.  Move it to
+be beside the other command line parsing.
+
+No functional change.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 1171c02..99310c8 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -167,6 +167,73 @@ static int __init parse_spec_ctrl(const char *s)
+ }
+ custom_param("spec-ctrl", parse_spec_ctrl);
+ 
++int8_t __read_mostly opt_xpti_hwdom = -1;
++int8_t __read_mostly opt_xpti_domu = -1;
++
++static __init void xpti_init_default(uint64_t caps)
++{
++    if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
++        caps = ARCH_CAPS_RDCL_NO;
++
++    if ( caps & ARCH_CAPS_RDCL_NO )
++    {
++        if ( opt_xpti_hwdom < 0 )
++            opt_xpti_hwdom = 0;
++        if ( opt_xpti_domu < 0 )
++            opt_xpti_domu = 0;
++    }
++    else
++    {
++        if ( opt_xpti_hwdom < 0 )
++            opt_xpti_hwdom = 1;
++        if ( opt_xpti_domu < 0 )
++            opt_xpti_domu = 1;
++    }
++}
++
++static __init int parse_xpti(const char *s)
++{
++    const char *ss;
++    int val, rc = 0;
++
++    /* Interpret 'xpti' alone in its positive boolean form. */
++    if ( *s == '\0' )
++        opt_xpti_hwdom = opt_xpti_domu = 1;
++
++    do {
++        ss = strchr(s, ',');
++        if ( !ss )
++            ss = strchr(s, '\0');
++
++        switch ( parse_bool(s, ss) )
++        {
++        case 0:
++            opt_xpti_hwdom = opt_xpti_domu = 0;
++            break;
++
++        case 1:
++            opt_xpti_hwdom = opt_xpti_domu = 1;
++            break;
++
++        default:
++            if ( !strcmp(s, "default") )
++                opt_xpti_hwdom = opt_xpti_domu = -1;
++            else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
++                opt_xpti_hwdom = val;
++            else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
++                opt_xpti_domu = val;
++            else if ( *s )
++                rc = -EINVAL;
++            break;
++        }
++
++        s = ss + 1;
++    } while ( *ss );
++
++    return rc;
++}
++custom_param("xpti", parse_xpti);
++
+ int8_t __read_mostly opt_pv_l1tf_hwdom = -1;
+ int8_t __read_mostly opt_pv_l1tf_domu = -1;
+ 
+@@ -627,73 +694,6 @@ static __init void l1tf_calculations(uint64_t caps)
+                                             : (3ul << (paddr_bits - 2))));
+ }
+ 
+-int8_t __read_mostly opt_xpti_hwdom = -1;
+-int8_t __read_mostly opt_xpti_domu = -1;
+-
+-static __init void xpti_init_default(uint64_t caps)
+-{
+-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+-        caps = ARCH_CAPS_RDCL_NO;
+-
+-    if ( caps & ARCH_CAPS_RDCL_NO )
+-    {
+-        if ( opt_xpti_hwdom < 0 )
+-            opt_xpti_hwdom = 0;
+-        if ( opt_xpti_domu < 0 )
+-            opt_xpti_domu = 0;
+-    }
+-    else
+-    {
+-        if ( opt_xpti_hwdom < 0 )
+-            opt_xpti_hwdom = 1;
+-        if ( opt_xpti_domu < 0 )
+-            opt_xpti_domu = 1;
+-    }
+-}
+-
+-static __init int parse_xpti(const char *s)
+-{
+-    const char *ss;
+-    int val, rc = 0;
+-
+-    /* Interpret 'xpti' alone in its positive boolean form. */
+-    if ( *s == '\0' )
+-        opt_xpti_hwdom = opt_xpti_domu = 1;
+-
+-    do {
+-        ss = strchr(s, ',');
+-        if ( !ss )
+-            ss = strchr(s, '\0');
+-
+-        switch ( parse_bool(s, ss) )
+-        {
+-        case 0:
+-            opt_xpti_hwdom = opt_xpti_domu = 0;
+-            break;
+-
+-        case 1:
+-            opt_xpti_hwdom = opt_xpti_domu = 1;
+-            break;
+-
+-        default:
+-            if ( !strcmp(s, "default") )
+-                opt_xpti_hwdom = opt_xpti_domu = -1;
+-            else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
+-                opt_xpti_hwdom = val;
+-            else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
+-                opt_xpti_domu = val;
+-            else if ( *s )
+-                rc = -EINVAL;
+-            break;
+-        }
+-
+-        s = ss + 1;
+-    } while ( *ss );
+-
+-    return rc;
+-}
+-custom_param("xpti", parse_xpti);
+-
+ void __init init_speculation_mitigations(void)
+ {
+     enum ind_thunk thunk = THUNK_DEFAULT;

Added: head/emulators/xen-kernel/files/xsa297-4.12-2.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/xsa297-4.12-2.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,54 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/msr: Definitions for MSR_INTEL_CORE_THREAD_COUNT
+
+This is a model specific register which details the current configuration
+cores and threads in the package.  Because of how Hyperthread and Core
+configuration works works in firmware, the MSR it is de-facto constant and
+will remain unchanged until the next system reset.
+
+It is a read only MSR (so unilaterally reject writes), but for now retain its
+leaky-on-read properties.  Further CPUID/MSR work is required before we can
+start virtualising a consistent topology to the guest, and retaining the old
+behaviour is the safest course of action.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
+index 4df4a59..a7f67d9 100644
+--- a/xen/arch/x86/msr.c
++++ b/xen/arch/x86/msr.c
+@@ -200,6 +200,10 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+                                    ARRAY_SIZE(msrs->dr_mask))];
+         break;
+ 
++        /*
++         * TODO: Implement when we have better topology representation.
++    case MSR_INTEL_CORE_THREAD_COUNT:
++         */
+     default:
+         return X86EMUL_UNHANDLEABLE;
+     }
+@@ -229,6 +233,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+     {
+         uint64_t rsvd;
+ 
++    case MSR_INTEL_CORE_THREAD_COUNT:
+     case MSR_INTEL_PLATFORM_INFO:
+     case MSR_ARCH_CAPABILITIES:
+         /* Read-only */
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index 11512d4..389f95f 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -32,6 +32,10 @@
+ #define EFER_KNOWN_MASK		(EFER_SCE | EFER_LME | EFER_LMA | EFER_NX | \
+ 				 EFER_SVME | EFER_FFXSE)
+ 
++#define MSR_INTEL_CORE_THREAD_COUNT     0x00000035
++#define MSR_CTC_THREAD_MASK             0x0000ffff
++#define MSR_CTC_CORE_MASK               0xffff0000
++
+ /* Speculation Controls. */
+ #define MSR_SPEC_CTRL			0x00000048
+ #define SPEC_CTRL_IBRS			(_AC(1, ULL) << 0)

Added: head/emulators/xen-kernel/files/xsa297-4.12-3.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/xsa297-4.12-3.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,109 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/boot: Detect the firmware SMT setting correctly on Intel hardware
+
+While boot_cpu_data.x86_num_siblings is an accurate value to use on AMD
+hardware, it isn't on Intel when the user has disabled Hyperthreading in the
+firmware.  As a result, a user which has chosen to disable HT still gets
+nagged on L1TF-vulnerable hardware when they haven't chosen an explicit
+smt=<bool> setting.
+
+Make use of the largely-undocumented MSR_INTEL_CORE_THREAD_COUNT which in
+practice exists since Nehalem, when booting on real hardware.  Fall back to
+using the ACPI table APIC IDs.
+
+While adjusting this logic, fix a latent bug in amd_get_topology().  The
+thread count field in CPUID.0x8000001e.ebx is documented as 8 bits wide,
+rather than 2 bits wide.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
+index c790416..b1debac 100644
+--- a/xen/arch/x86/cpu/amd.c
++++ b/xen/arch/x86/cpu/amd.c
+@@ -507,7 +507,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
+                 u32 eax, ebx, ecx, edx;
+ 
+                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+-                c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
++                c->x86_num_siblings = ((ebx >> 8) & 0xff) + 1;
+ 
+                 if (c->x86 < 0x17)
+                         c->compute_unit_id = ebx & 0xFF;
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index 99310c8..e49ab3f 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -368,6 +368,45 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+ #endif
+ }
+ 
++static bool __init check_smt_enabled(void)
++{
++    uint64_t val;
++    unsigned int cpu;
++
++    /*
++     * x86_num_siblings defaults to 1 in the absence of other information, and
++     * is adjusted based on other topology information found in CPUID leaves.
++     *
++     * On AMD hardware, it will be the current SMT configuration.  On Intel
++     * hardware, it will represent the maximum capability, rather than the
++     * current configuration.
++     */
++    if ( boot_cpu_data.x86_num_siblings < 2 )
++        return false;
++
++    /*
++     * Intel Nehalem and later hardware does have an MSR which reports the
++     * current count of cores/threads in the package.
++     *
++     * At the time of writing, it is almost completely undocumented, so isn't
++     * virtualised reliably.
++     */
++    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && !cpu_has_hypervisor &&
++         !rdmsr_safe(MSR_INTEL_CORE_THREAD_COUNT, val) )
++        return (MASK_EXTR(val, MSR_CTC_CORE_MASK) !=
++                MASK_EXTR(val, MSR_CTC_THREAD_MASK));
++
++    /*
++     * Search over the CPUs reported in the ACPI tables.  Any whose APIC ID
++     * has a non-zero thread id component indicates that SMT is active.
++     */
++    for_each_present_cpu ( cpu )
++        if ( x86_cpu_to_apicid[cpu] & (boot_cpu_data.x86_num_siblings - 1) )
++            return true;
++
++    return false;
++}
++
+ /* Calculate whether Retpoline is known-safe on this CPU. */
+ static bool __init retpoline_safe(uint64_t caps)
+ {
+@@ -697,12 +736,14 @@ static __init void l1tf_calculations(uint64_t caps)
+ void __init init_speculation_mitigations(void)
+ {
+     enum ind_thunk thunk = THUNK_DEFAULT;
+-    bool use_spec_ctrl = false, ibrs = false;
++    bool use_spec_ctrl = false, ibrs = false, hw_smt_enabled;
+     uint64_t caps = 0;
+ 
+     if ( boot_cpu_has(X86_FEATURE_ARCH_CAPS) )
+         rdmsrl(MSR_ARCH_CAPABILITIES, caps);
+ 
++    hw_smt_enabled = check_smt_enabled();
++
+     /*
+      * Has the user specified any custom BTI mitigations?  If so, follow their
+      * instructions exactly and disable all heuristics.
+@@ -873,8 +914,7 @@ void __init init_speculation_mitigations(void)
+      * However, if we are on affected hardware, with HT enabled, and the user
+      * hasn't explicitly chosen whether to use HT or not, nag them to do so.
+      */
+-    if ( opt_smt == -1 && cpu_has_bug_l1tf && !pv_shim &&
+-         boot_cpu_data.x86_num_siblings > 1 )
++    if ( opt_smt == -1 && cpu_has_bug_l1tf && !pv_shim && hw_smt_enabled )
+         warning_add(
+             "Booted on L1TF-vulnerable hardware with SMT/Hyperthreading\n"
+             "enabled.  Please assess your configuration and choose an\n"

Added: head/emulators/xen-kernel/files/xsa297-4.12-4.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/xsa297-4.12-4.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,55 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Misc non-functional cleanup
+
+ * Identify BTI in the spec_ctrl_{enter,exit}_idle() comments, as other
+   mitigations will shortly appear.
+ * Use alternative_input() and cover the lack of memory cobber with a further
+   barrier.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
+index 779da2b..20ee112 100644
+--- a/xen/include/asm-x86/spec_ctrl.h
++++ b/xen/include/asm-x86/spec_ctrl.h
+@@ -68,6 +68,8 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+     uint32_t val = 0;
+ 
+     /*
++     * Branch Target Injection:
++     *
+      * Latch the new shadow value, then enable shadowing, then update the MSR.
+      * There are no SMP issues here; only local processor ordering concerns.
+      */
+@@ -75,8 +77,9 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+     barrier();
+     info->spec_ctrl_flags |= SCF_use_shadow;
+     barrier();
+-    asm volatile ( ALTERNATIVE("", "wrmsr", X86_FEATURE_SC_MSR_IDLE)
+-                   :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
++    alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
++                      "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
++    barrier();
+ }
+ 
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
+@@ -85,13 +88,16 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
+     uint32_t val = info->xen_spec_ctrl;
+ 
+     /*
++     * Branch Target Injection:
++     *
+      * Disable shadowing before updating the MSR.  There are no SMP issues
+      * here; only local processor ordering concerns.
+      */
+     info->spec_ctrl_flags &= ~SCF_use_shadow;
+     barrier();
+-    asm volatile ( ALTERNATIVE("", "wrmsr", X86_FEATURE_SC_MSR_IDLE)
+-                   :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
++    alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
++                      "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
++    barrier();
+ }
+ 
+ #endif /* __ASSEMBLY__ */

Added: head/emulators/xen-kernel/files/xsa297-4.12-5.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/xsa297-4.12-5.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,147 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: CPUID/MSR definitions for Microarchitectural Data
+ Sampling
+
+The MD_CLEAR feature can be automatically offered to guests.  No
+infrastructure is needed in Xen to support the guest making use of it.
+
+This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
+index 6db82f3..f80d8d8 100644
+--- a/docs/misc/xen-command-line.pandoc
++++ b/docs/misc/xen-command-line.pandoc
+@@ -483,7 +483,7 @@ accounting for hardware capabilities as enumerated via CPUID.
+ 
+ Currently accepted:
+ 
+-The Speculation Control hardware features `ibrsb`, `stibp`, `ibpb`,
++The Speculation Control hardware features `md-clear`, `ibrsb`, `stibp`, `ibpb`,
+ `l1d-flush` and `ssbd` are used by default if available and applicable.  They can
+ be ignored, e.g. `no-ibrsb`, at which point Xen won't use them itself, and
+ won't offer them to guests.
+diff --git a/tools/libxl/libxl_cpuid.c b/tools/libxl/libxl_cpuid.c
+index 52e16c2..5a1702d 100644
+--- a/tools/libxl/libxl_cpuid.c
++++ b/tools/libxl/libxl_cpuid.c
+@@ -202,6 +202,7 @@ int libxl_cpuid_parse_config(libxl_cpuid_policy_list *cpuid, const char* str)
+ 
+         {"avx512-4vnniw",0x00000007,  0, CPUID_REG_EDX,  2,  1},
+         {"avx512-4fmaps",0x00000007,  0, CPUID_REG_EDX,  3,  1},
++        {"md-clear",     0x00000007,  0, CPUID_REG_EDX, 10,  1},
+         {"ibrsb",        0x00000007,  0, CPUID_REG_EDX, 26,  1},
+         {"stibp",        0x00000007,  0, CPUID_REG_EDX, 27,  1},
+         {"l1d-flush",    0x00000007,  0, CPUID_REG_EDX, 28,  1},
+diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c
+index d87a72e..f67ecd3 100644
+--- a/tools/misc/xen-cpuid.c
++++ b/tools/misc/xen-cpuid.c
+@@ -146,6 +146,7 @@ static const char *str_7d0[32] =
+ {
+     [ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
+ 
++    [10] = "md-clear",
+     /* 12 */                [13] = "tsx-force-abort",
+ 
+     [26] = "ibrsb",         [27] = "stibp",
+diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
+index ab0aab6..3efad9c 100644
+--- a/xen/arch/x86/cpuid.c
++++ b/xen/arch/x86/cpuid.c
+@@ -29,7 +29,12 @@ static int __init parse_xen_cpuid(const char *s)
+         if ( !ss )
+             ss = strchr(s, '\0');
+ 
+-        if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
++        if ( (val = parse_boolean("md-clear", s, ss)) >= 0 )
++        {
++            if ( !val )
++                setup_clear_cpu_cap(X86_FEATURE_MD_CLEAR);
++        }
++        else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
+         {
+             if ( !val )
+                 setup_clear_cpu_cap(X86_FEATURE_IBPB);
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index e49ab3f..a573b02 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -291,17 +291,19 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+     printk("Speculative mitigation facilities:\n");
+ 
+     /* Hardware features which pertain to speculative mitigations. */
+-    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s\n",
++    printk("  Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+            (_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS/IBPB" : "",
+            (_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP"     : "",
+            (_7d0 & cpufeat_mask(X86_FEATURE_L1D_FLUSH)) ? " L1D_FLUSH" : "",
+            (_7d0 & cpufeat_mask(X86_FEATURE_SSBD))  ? " SSBD"      : "",
++           (_7d0 & cpufeat_mask(X86_FEATURE_MD_CLEAR)) ? " MD_CLEAR" : "",
+            (e8b  & cpufeat_mask(X86_FEATURE_IBPB))  ? " IBPB"      : "",
+            (caps & ARCH_CAPS_IBRS_ALL)              ? " IBRS_ALL"  : "",
+            (caps & ARCH_CAPS_RDCL_NO)               ? " RDCL_NO"   : "",
+            (caps & ARCH_CAPS_RSBA)                  ? " RSBA"      : "",
+            (caps & ARCH_CAPS_SKIP_L1DFL)            ? " SKIP_L1DFL": "",
+-           (caps & ARCH_CAPS_SSB_NO)                ? " SSB_NO"    : "");
++           (caps & ARCH_CAPS_SSB_NO)                ? " SSB_NO"    : "",
++           (caps & ARCH_CAPS_MDS_NO)                ? " MDS_NO"    : "");
+ 
+     /* Compiled-in support which pertains to mitigations. */
+     if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) || IS_ENABLED(CONFIG_SHADOW_PAGING) )
+@@ -339,23 +341,25 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+      * mitigation support for guests.
+      */
+ #ifdef CONFIG_HVM
+-    printk("  Support for HVM VMs:%s%s%s%s\n",
++    printk("  Support for HVM VMs:%s%s%s%s%s\n",
+            (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
+             boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
+             opt_eager_fpu)                           ? ""               : " None",
+            boot_cpu_has(X86_FEATURE_SC_MSR_HVM)      ? " MSR_SPEC_CTRL" : "",
+            boot_cpu_has(X86_FEATURE_SC_RSB_HVM)      ? " RSB"           : "",
+-           opt_eager_fpu                             ? " EAGER_FPU"     : "");
++           opt_eager_fpu                             ? " EAGER_FPU"     : "",
++           boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      : "");
+ 
+ #endif
+ #ifdef CONFIG_PV
+-    printk("  Support for PV VMs:%s%s%s%s\n",
++    printk("  Support for PV VMs:%s%s%s%s%s\n",
+            (boot_cpu_has(X86_FEATURE_SC_MSR_PV) ||
+             boot_cpu_has(X86_FEATURE_SC_RSB_PV) ||
+             opt_eager_fpu)                           ? ""               : " None",
+            boot_cpu_has(X86_FEATURE_SC_MSR_PV)       ? " MSR_SPEC_CTRL" : "",
+            boot_cpu_has(X86_FEATURE_SC_RSB_PV)       ? " RSB"           : "",
+-           opt_eager_fpu                             ? " EAGER_FPU"     : "");
++           opt_eager_fpu                             ? " EAGER_FPU"     : "",
++           boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      : "");
+ 
+     printk("  XPTI (64-bit PV only): Dom0 %s, DomU %s (with%s PCID)\n",
+            opt_xpti_hwdom ? "enabled" : "disabled",
+diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
+index 389f95f..637259b 100644
+--- a/xen/include/asm-x86/msr-index.h
++++ b/xen/include/asm-x86/msr-index.h
+@@ -51,6 +51,7 @@
+ #define ARCH_CAPS_RSBA			(_AC(1, ULL) << 2)
+ #define ARCH_CAPS_SKIP_L1DFL		(_AC(1, ULL) << 3)
+ #define ARCH_CAPS_SSB_NO		(_AC(1, ULL) << 4)
++#define ARCH_CAPS_MDS_NO		(_AC(1, ULL) << 5)
+ 
+ #define MSR_FLUSH_CMD			0x0000010b
+ #define FLUSH_CMD_L1D			(_AC(1, ULL) << 0)
+diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
+index 2bcc548..55231d4 100644
+--- a/xen/include/public/arch-x86/cpufeatureset.h
++++ b/xen/include/public/arch-x86/cpufeatureset.h
+@@ -242,6 +242,7 @@ XEN_CPUFEATURE(IBPB,          8*32+12) /*A  IBPB support only (no IBRS, used by
+ /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
+ XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network Instructions */
+ XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation Single Precision */
++XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*A  VERW clears microarchitectural buffers */
+ XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
+ XEN_CPUFEATURE(IBRSB,         9*32+26) /*A  IBRS and IBPB support (used by Intel) */
+ XEN_CPUFEATURE(STIBP,         9*32+27) /*A  STIBP */

Added: head/emulators/xen-kernel/files/xsa297-4.12-6.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/emulators/xen-kernel/files/xsa297-4.12-6.patch	Wed May 15 10:24:57 2019	(r501706)
@@ -0,0 +1,134 @@
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Subject: x86/spec-ctrl: Infrastructure to use VERW to flush pipeline buffers
+
+Three synthetic features are introduced, as we need individual control of
+each, depending on circumstances.  A later change will enable them at
+appropriate points.
+
+The verw_sel field doesn't strictly need to live in struct cpu_info.  It lives
+there because there is a convenient hole it can fill, and it reduces the
+complexity of the SPEC_CTRL_EXIT_TO_{PV,HVM} assembly by avoiding the need for
+any temporary stack maintenance.
+
+This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
+index 052228c..33930ce 100644
+--- a/xen/arch/x86/x86_64/asm-offsets.c
++++ b/xen/arch/x86/x86_64/asm-offsets.c
+@@ -110,6 +110,7 @@ void __dummy__(void)
+     BLANK();
+ 
+     OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
++    OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
+     OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
+     OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
+     OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3);
+diff --git a/xen/include/asm-x86/cpufeatures.h b/xen/include/asm-x86/cpufeatures.h
+index 0c06274..ba55245 100644
+--- a/xen/include/asm-x86/cpufeatures.h
++++ b/xen/include/asm-x86/cpufeatures.h
+@@ -31,3 +31,6 @@ XEN_CPUFEATURE(SC_RSB_PV,       (FSCAPINTS+0)*32+18) /* RSB overwrite needed for
+ XEN_CPUFEATURE(SC_RSB_HVM,      (FSCAPINTS+0)*32+19) /* RSB overwrite needed for HVM */
+ XEN_CPUFEATURE(SC_MSR_IDLE,     (FSCAPINTS+0)*32+21) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */
+ XEN_CPUFEATURE(XEN_LBR,         (FSCAPINTS+0)*32+22) /* Xen uses MSR_DEBUGCTL.LBR */
++XEN_CPUFEATURE(SC_VERW_PV,      (FSCAPINTS+0)*32+23) /* VERW used by Xen for PV */
++XEN_CPUFEATURE(SC_VERW_HVM,     (FSCAPINTS+0)*32+24) /* VERW used by Xen for HVM */
++XEN_CPUFEATURE(SC_VERW_IDLE,    (FSCAPINTS+0)*32+25) /* VERW used by Xen for idle */
+diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
+index 5bd64b2..f3508c3 100644
+--- a/xen/include/asm-x86/current.h
++++ b/xen/include/asm-x86/current.h
+@@ -38,6 +38,7 @@ struct vcpu;
+ struct cpu_info {
+     struct cpu_user_regs guest_cpu_user_regs;
+     unsigned int processor_id;
++    unsigned int verw_sel;
+     struct vcpu *current_vcpu;
+     unsigned long per_cpu_offset;
+     unsigned long cr4;
+diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
+index 20ee112..ba03bb4 100644
+--- a/xen/include/asm-x86/spec_ctrl.h
++++ b/xen/include/asm-x86/spec_ctrl.h
+@@ -60,6 +60,13 @@ static inline void init_shadow_spec_ctrl_state(void)
+     info->shadow_spec_ctrl = 0;
+     info->xen_spec_ctrl = default_xen_spec_ctrl;
+     info->spec_ctrl_flags = default_spec_ctrl_flags;
++
++    /*
++     * For least latency, the VERW selector should be a writeable data
++     * descriptor resident in the cache.  __HYPERVISOR_DS32 shares a cache
++     * line with __HYPERVISOR_CS, so is expected to be very cache-hot.
++     */
++    info->verw_sel = __HYPERVISOR_DS32;
+ }
+ 
+ /* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */
+@@ -80,6 +87,22 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+     alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
+                       "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
+     barrier();
++
++    /*
++     * Microarchitectural Store Buffer Data Sampling:
++     *
++     * On vulnerable systems, store buffer entries are statically partitioned
++     * between active threads.  When entering idle, our store buffer entries
++     * are re-partitioned to allow the other threads to use them.
++     *
++     * Flush the buffers to ensure that no sensitive data of ours can be
++     * leaked by a sibling after it gets our store buffer entries.
++     *
++     * Note: VERW must be encoded with a memory operand, as it is only that
++     * form which causes a flush.
++     */
++    alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE,

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201905151024.x4FAOvbq015331>