mirror of
https://git.FreeBSD.org/ports.git
synced 2024-11-19 00:13:33 +00:00
emulators/xen-kernel: import fix for cache attributes
Import fixes for cache attributes of grant and foreign maps on Intel. This should result in an I/O performance improvement when using FreeBSD as a dom0 on Intel hardware. Approved by: bapt (implicit)
This commit is contained in:
parent
d0e59dedfb
commit
fa0f04a80f
@ -1,6 +1,6 @@
|
||||
PORTNAME= xen
|
||||
PORTVERSION= 4.15.0
|
||||
PORTREVISION= 0
|
||||
PORTREVISION= 1
|
||||
CATEGORIES= emulators
|
||||
MASTER_SITES= http://downloads.xenproject.org/release/xen/${PORTVERSION}/
|
||||
PKGNAMESUFFIX= -kernel
|
||||
@ -23,6 +23,11 @@ STRIP= #
|
||||
PLIST_FILES= /boot/xen \
|
||||
lib/debug/boot/xen.debug
|
||||
|
||||
# Fix grant/foreign mapping cache attributes on Intel.
|
||||
EXTRA_PATCHES+= ${PATCHDIR}/0001-x86-mtrr-remove-stale-function-prototype.patch:-p1 \
|
||||
${PATCHDIR}/0002-x86-mtrr-move-epte_get_entry_emt-to-p2m-ept.c.patch:-p1 \
|
||||
${PATCHDIR}/0003-x86-ept-force-WB-cache-attributes-for-grant-and-fore.patch:-p1
|
||||
|
||||
.include <bsd.port.options.mk>
|
||||
|
||||
.if ${OPSYS} != FreeBSD
|
||||
|
@ -0,0 +1,31 @@
|
||||
From c8aaa97f84170192b05b3020a55c69f71d84629f Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
|
||||
Date: Mon, 31 May 2021 12:47:12 +0200
|
||||
Subject: [PATCH 1/3] x86/mtrr: remove stale function prototype
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Fixes: 1c84d04673 ('VMX: remove the problematic set_uc_mode logic')
|
||||
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
|
||||
Acked-by: Jan Beulich <jbeulich@suse.com>
|
||||
---
|
||||
xen/include/asm-x86/mtrr.h | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/xen/include/asm-x86/mtrr.h b/xen/include/asm-x86/mtrr.h
|
||||
index 4be704cb6a..24e5de5c22 100644
|
||||
--- a/xen/include/asm-x86/mtrr.h
|
||||
+++ b/xen/include/asm-x86/mtrr.h
|
||||
@@ -78,8 +78,6 @@ extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr,
|
||||
extern int epte_get_entry_emt(struct domain *, unsigned long gfn, mfn_t mfn,
|
||||
unsigned int order, uint8_t *ipat,
|
||||
bool_t direct_mmio);
|
||||
-extern void ept_change_entry_emt_with_range(
|
||||
- struct domain *d, unsigned long start_gfn, unsigned long end_gfn);
|
||||
extern unsigned char pat_type_2_pte_flags(unsigned char pat_type);
|
||||
extern int hold_mtrr_updates_on_aps;
|
||||
extern void mtrr_aps_sync_begin(void);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,377 @@
|
||||
From bad7fc7a20452f5ba5e2aaf0019affec7fa87271 Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
|
||||
Date: Thu, 17 Jun 2021 17:58:11 +0200
|
||||
Subject: [PATCH 2/3] x86/mtrr: move epte_get_entry_emt to p2m-ept.c
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
This is an EPT specific function, so it shouldn't live in the generic
|
||||
mtrr file. Such movement is also needed for future work that will
|
||||
require passing a p2m_type_t parameter to epte_get_entry_emt, and
|
||||
making that type visible to the mtrr users is cumbersome and
|
||||
unneeded.
|
||||
|
||||
Moving epte_get_entry_emt out of mtrr.c requires making the helper to
|
||||
get the MTRR type of an address from the mtrr state public. While
|
||||
there rename the function to start with the mtrr prefix, like other
|
||||
mtrr related functions.
|
||||
|
||||
While there fix some of the types of the function parameters.
|
||||
|
||||
No functional change intended.
|
||||
|
||||
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
|
||||
Reviewed-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
|
||||
---
|
||||
xen/arch/x86/hvm/mtrr.c | 109 +---------------------------
|
||||
xen/arch/x86/mm/p2m-ept.c | 115 ++++++++++++++++++++++++++++--
|
||||
xen/include/asm-x86/hvm/vmx/vmx.h | 2 +
|
||||
xen/include/asm-x86/mtrr.h | 5 +-
|
||||
4 files changed, 117 insertions(+), 114 deletions(-)
|
||||
|
||||
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
|
||||
index fb051d59c3..4a9f3177ed 100644
|
||||
--- a/xen/arch/x86/hvm/mtrr.c
|
||||
+++ b/xen/arch/x86/hvm/mtrr.c
|
||||
@@ -194,8 +194,7 @@ void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
|
||||
* May return a negative value when order > 0, indicating to the caller
|
||||
* that the respective mapping needs splitting.
|
||||
*/
|
||||
-static int get_mtrr_type(const struct mtrr_state *m,
|
||||
- paddr_t pa, unsigned int order)
|
||||
+int mtrr_get_type(const struct mtrr_state *m, paddr_t pa, unsigned int order)
|
||||
{
|
||||
uint8_t overlap_mtrr = 0;
|
||||
uint8_t overlap_mtrr_pos = 0;
|
||||
@@ -323,7 +322,7 @@ static uint8_t effective_mm_type(struct mtrr_state *m,
|
||||
* just use it
|
||||
*/
|
||||
if ( gmtrr_mtype == NO_HARDCODE_MEM_TYPE )
|
||||
- mtrr_mtype = get_mtrr_type(m, gpa, 0);
|
||||
+ mtrr_mtype = mtrr_get_type(m, gpa, 0);
|
||||
else
|
||||
mtrr_mtype = gmtrr_mtype;
|
||||
|
||||
@@ -350,7 +349,7 @@ uint32_t get_pat_flags(struct vcpu *v,
|
||||
guest_eff_mm_type = effective_mm_type(g, pat, gpaddr,
|
||||
gl1e_flags, gmtrr_mtype);
|
||||
/* 2. Get the memory type of host physical address, with MTRR */
|
||||
- shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr, 0);
|
||||
+ shadow_mtrr_type = mtrr_get_type(&mtrr_state, spaddr, 0);
|
||||
|
||||
/* 3. Find the memory type in PAT, with host MTRR memory type
|
||||
* and guest effective memory type.
|
||||
@@ -789,108 +788,6 @@ void memory_type_changed(struct domain *d)
|
||||
}
|
||||
}
|
||||
|
||||
-int epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn,
|
||||
- unsigned int order, uint8_t *ipat, bool_t direct_mmio)
|
||||
-{
|
||||
- int gmtrr_mtype, hmtrr_mtype;
|
||||
- struct vcpu *v = current;
|
||||
- unsigned long i;
|
||||
-
|
||||
- *ipat = 0;
|
||||
-
|
||||
- if ( v->domain != d )
|
||||
- v = d->vcpu ? d->vcpu[0] : NULL;
|
||||
-
|
||||
- /* Mask, not add, for order so it works with INVALID_MFN on unmapping */
|
||||
- if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
|
||||
- mfn_x(mfn) | ((1UL << order) - 1)) )
|
||||
- {
|
||||
- if ( !order || rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
|
||||
- mfn_x(mfn) | ((1UL << order) - 1)) )
|
||||
- {
|
||||
- *ipat = 1;
|
||||
- return MTRR_TYPE_UNCACHABLE;
|
||||
- }
|
||||
- /* Force invalid memory type so resolve_misconfig() will split it */
|
||||
- return -1;
|
||||
- }
|
||||
-
|
||||
- if ( !mfn_valid(mfn) )
|
||||
- {
|
||||
- *ipat = 1;
|
||||
- return MTRR_TYPE_UNCACHABLE;
|
||||
- }
|
||||
-
|
||||
- if ( !direct_mmio && !is_iommu_enabled(d) && !cache_flush_permitted(d) )
|
||||
- {
|
||||
- *ipat = 1;
|
||||
- return MTRR_TYPE_WRBACK;
|
||||
- }
|
||||
-
|
||||
- for ( i = 0; i < (1ul << order); i++ )
|
||||
- {
|
||||
- if ( is_special_page(mfn_to_page(mfn_add(mfn, i))) )
|
||||
- {
|
||||
- if ( order )
|
||||
- return -1;
|
||||
- *ipat = 1;
|
||||
- return MTRR_TYPE_WRBACK;
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- if ( direct_mmio )
|
||||
- return MTRR_TYPE_UNCACHABLE;
|
||||
-
|
||||
- gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, _gfn(gfn), order);
|
||||
- if ( gmtrr_mtype >= 0 )
|
||||
- {
|
||||
- *ipat = 1;
|
||||
- return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
|
||||
- : MTRR_TYPE_UNCACHABLE;
|
||||
- }
|
||||
- if ( gmtrr_mtype == -EADDRNOTAVAIL )
|
||||
- return -1;
|
||||
-
|
||||
- gmtrr_mtype = is_hvm_domain(d) && v ?
|
||||
- get_mtrr_type(&v->arch.hvm.mtrr,
|
||||
- gfn << PAGE_SHIFT, order) :
|
||||
- MTRR_TYPE_WRBACK;
|
||||
- hmtrr_mtype = get_mtrr_type(&mtrr_state, mfn_x(mfn) << PAGE_SHIFT, order);
|
||||
- if ( gmtrr_mtype < 0 || hmtrr_mtype < 0 )
|
||||
- return -1;
|
||||
-
|
||||
- /* If both types match we're fine. */
|
||||
- if ( likely(gmtrr_mtype == hmtrr_mtype) )
|
||||
- return hmtrr_mtype;
|
||||
-
|
||||
- /* If either type is UC, we have to go with that one. */
|
||||
- if ( gmtrr_mtype == MTRR_TYPE_UNCACHABLE ||
|
||||
- hmtrr_mtype == MTRR_TYPE_UNCACHABLE )
|
||||
- return MTRR_TYPE_UNCACHABLE;
|
||||
-
|
||||
- /* If either type is WB, we have to go with the other one. */
|
||||
- if ( gmtrr_mtype == MTRR_TYPE_WRBACK )
|
||||
- return hmtrr_mtype;
|
||||
- if ( hmtrr_mtype == MTRR_TYPE_WRBACK )
|
||||
- return gmtrr_mtype;
|
||||
-
|
||||
- /*
|
||||
- * At this point we have disagreeing WC, WT, or WP types. The only
|
||||
- * combination that can be cleanly resolved is WT:WP. The ones involving
|
||||
- * WC need to be converted to UC, both due to the memory ordering
|
||||
- * differences and because WC disallows reads to be cached (WT and WP
|
||||
- * permit this), while WT and WP require writes to go straight to memory
|
||||
- * (WC can buffer them).
|
||||
- */
|
||||
- if ( (gmtrr_mtype == MTRR_TYPE_WRTHROUGH &&
|
||||
- hmtrr_mtype == MTRR_TYPE_WRPROT) ||
|
||||
- (gmtrr_mtype == MTRR_TYPE_WRPROT &&
|
||||
- hmtrr_mtype == MTRR_TYPE_WRTHROUGH) )
|
||||
- return MTRR_TYPE_WRPROT;
|
||||
-
|
||||
- return MTRR_TYPE_UNCACHABLE;
|
||||
-}
|
||||
-
|
||||
/*
|
||||
* Local variables:
|
||||
* mode: C
|
||||
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
|
||||
index 23d411f01d..542fe5ef34 100644
|
||||
--- a/xen/arch/x86/mm/p2m-ept.c
|
||||
+++ b/xen/arch/x86/mm/p2m-ept.c
|
||||
@@ -20,6 +20,7 @@
|
||||
#include <public/hvm/dm_op.h>
|
||||
#include <asm/altp2m.h>
|
||||
#include <asm/current.h>
|
||||
+#include <asm/iocap.h>
|
||||
#include <asm/paging.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/domain.h>
|
||||
@@ -485,6 +486,109 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
|
||||
return rc;
|
||||
}
|
||||
|
||||
+int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
|
||||
+ unsigned int order, bool *ipat, bool direct_mmio)
|
||||
+{
|
||||
+ int gmtrr_mtype, hmtrr_mtype;
|
||||
+ struct vcpu *v = current;
|
||||
+ unsigned long i;
|
||||
+
|
||||
+ *ipat = false;
|
||||
+
|
||||
+ if ( v->domain != d )
|
||||
+ v = d->vcpu ? d->vcpu[0] : NULL;
|
||||
+
|
||||
+ /* Mask, not add, for order so it works with INVALID_MFN on unmapping */
|
||||
+ if ( rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn),
|
||||
+ mfn_x(mfn) | ((1UL << order) - 1)) )
|
||||
+ {
|
||||
+ if ( !order || rangeset_contains_range(mmio_ro_ranges, mfn_x(mfn),
|
||||
+ mfn_x(mfn) | ((1UL << order) - 1)) )
|
||||
+ {
|
||||
+ *ipat = true;
|
||||
+ return MTRR_TYPE_UNCACHABLE;
|
||||
+ }
|
||||
+ /* Force invalid memory type so resolve_misconfig() will split it */
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ if ( !mfn_valid(mfn) )
|
||||
+ {
|
||||
+ *ipat = true;
|
||||
+ return MTRR_TYPE_UNCACHABLE;
|
||||
+ }
|
||||
+
|
||||
+ if ( !direct_mmio && !is_iommu_enabled(d) && !cache_flush_permitted(d) )
|
||||
+ {
|
||||
+ *ipat = true;
|
||||
+ return MTRR_TYPE_WRBACK;
|
||||
+ }
|
||||
+
|
||||
+ for ( i = 0; i < (1ul << order); i++ )
|
||||
+ {
|
||||
+ if ( is_special_page(mfn_to_page(mfn_add(mfn, i))) )
|
||||
+ {
|
||||
+ if ( order )
|
||||
+ return -1;
|
||||
+ *ipat = true;
|
||||
+ return MTRR_TYPE_WRBACK;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if ( direct_mmio )
|
||||
+ return MTRR_TYPE_UNCACHABLE;
|
||||
+
|
||||
+ gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
|
||||
+ if ( gmtrr_mtype >= 0 )
|
||||
+ {
|
||||
+ *ipat = true;
|
||||
+ return gmtrr_mtype != PAT_TYPE_UC_MINUS ? gmtrr_mtype
|
||||
+ : MTRR_TYPE_UNCACHABLE;
|
||||
+ }
|
||||
+ if ( gmtrr_mtype == -EADDRNOTAVAIL )
|
||||
+ return -1;
|
||||
+
|
||||
+ gmtrr_mtype = is_hvm_domain(d) && v ?
|
||||
+ mtrr_get_type(&v->arch.hvm.mtrr,
|
||||
+ gfn << PAGE_SHIFT, order) :
|
||||
+ MTRR_TYPE_WRBACK;
|
||||
+ hmtrr_mtype = mtrr_get_type(&mtrr_state, mfn_x(mfn) << PAGE_SHIFT,
|
||||
+ order);
|
||||
+ if ( gmtrr_mtype < 0 || hmtrr_mtype < 0 )
|
||||
+ return -1;
|
||||
+
|
||||
+ /* If both types match we're fine. */
|
||||
+ if ( likely(gmtrr_mtype == hmtrr_mtype) )
|
||||
+ return hmtrr_mtype;
|
||||
+
|
||||
+ /* If either type is UC, we have to go with that one. */
|
||||
+ if ( gmtrr_mtype == MTRR_TYPE_UNCACHABLE ||
|
||||
+ hmtrr_mtype == MTRR_TYPE_UNCACHABLE )
|
||||
+ return MTRR_TYPE_UNCACHABLE;
|
||||
+
|
||||
+ /* If either type is WB, we have to go with the other one. */
|
||||
+ if ( gmtrr_mtype == MTRR_TYPE_WRBACK )
|
||||
+ return hmtrr_mtype;
|
||||
+ if ( hmtrr_mtype == MTRR_TYPE_WRBACK )
|
||||
+ return gmtrr_mtype;
|
||||
+
|
||||
+ /*
|
||||
+ * At this point we have disagreeing WC, WT, or WP types. The only
|
||||
+ * combination that can be cleanly resolved is WT:WP. The ones involving
|
||||
+ * WC need to be converted to UC, both due to the memory ordering
|
||||
+ * differences and because WC disallows reads to be cached (WT and WP
|
||||
+ * permit this), while WT and WP require writes to go straight to memory
|
||||
+ * (WC can buffer them).
|
||||
+ */
|
||||
+ if ( (gmtrr_mtype == MTRR_TYPE_WRTHROUGH &&
|
||||
+ hmtrr_mtype == MTRR_TYPE_WRPROT) ||
|
||||
+ (gmtrr_mtype == MTRR_TYPE_WRPROT &&
|
||||
+ hmtrr_mtype == MTRR_TYPE_WRTHROUGH) )
|
||||
+ return MTRR_TYPE_WRPROT;
|
||||
+
|
||||
+ return MTRR_TYPE_UNCACHABLE;
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Resolve deliberately mis-configured (EMT field set to an invalid value)
|
||||
* entries in the page table hierarchy for the given GFN:
|
||||
@@ -519,7 +623,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
|
||||
|
||||
if ( level == 0 || is_epte_superpage(&e) )
|
||||
{
|
||||
- uint8_t ipat = 0;
|
||||
+ bool ipat;
|
||||
|
||||
if ( e.emt != MTRR_NUM_TYPES )
|
||||
break;
|
||||
@@ -535,7 +639,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
|
||||
e.emt = 0;
|
||||
if ( !is_epte_valid(&e) || !is_epte_present(&e) )
|
||||
continue;
|
||||
- e.emt = epte_get_entry_emt(p2m->domain, gfn + i,
|
||||
+ e.emt = epte_get_entry_emt(p2m->domain, _gfn(gfn + i),
|
||||
_mfn(e.mfn), 0, &ipat,
|
||||
e.sa_p2mt == p2m_mmio_direct);
|
||||
e.ipat = ipat;
|
||||
@@ -553,7 +657,8 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
|
||||
}
|
||||
else
|
||||
{
|
||||
- int emt = epte_get_entry_emt(p2m->domain, gfn, _mfn(e.mfn),
|
||||
+ int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn),
|
||||
+ _mfn(e.mfn),
|
||||
level * EPT_TABLE_ORDER, &ipat,
|
||||
e.sa_p2mt == p2m_mmio_direct);
|
||||
bool_t recalc = e.recalc;
|
||||
@@ -678,7 +783,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
|
||||
int ret, rc = 0;
|
||||
bool_t entry_written = 0;
|
||||
bool_t direct_mmio = (p2mt == p2m_mmio_direct);
|
||||
- uint8_t ipat = 0;
|
||||
+ bool ipat = false;
|
||||
bool_t need_modify_vtd_table = 1;
|
||||
bool_t vtd_pte_present = 0;
|
||||
unsigned int iommu_flags = p2m_get_iommu_flags(p2mt, mfn);
|
||||
@@ -790,7 +895,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
|
||||
|
||||
if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
|
||||
{
|
||||
- int emt = epte_get_entry_emt(p2m->domain, gfn, mfn,
|
||||
+ int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn), mfn,
|
||||
i * EPT_TABLE_ORDER, &ipat, direct_mmio);
|
||||
|
||||
if ( emt >= 0 )
|
||||
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
|
||||
index 534e9fc221..f668ee1f09 100644
|
||||
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
|
||||
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
|
||||
@@ -599,6 +599,8 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
|
||||
|
||||
void ept_walk_table(struct domain *d, unsigned long gfn);
|
||||
bool_t ept_handle_misconfig(uint64_t gpa);
|
||||
+int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
|
||||
+ unsigned int order, bool *ipat, bool direct_mmio);
|
||||
void setup_ept_dump(void);
|
||||
void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
|
||||
/* Locate an alternate p2m by its EPTP */
|
||||
diff --git a/xen/include/asm-x86/mtrr.h b/xen/include/asm-x86/mtrr.h
|
||||
index 24e5de5c22..e0fd1005ce 100644
|
||||
--- a/xen/include/asm-x86/mtrr.h
|
||||
+++ b/xen/include/asm-x86/mtrr.h
|
||||
@@ -72,12 +72,11 @@ extern int mtrr_add_page(unsigned long base, unsigned long size,
|
||||
unsigned int type, char increment);
|
||||
extern int mtrr_del(int reg, unsigned long base, unsigned long size);
|
||||
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
|
||||
+extern int mtrr_get_type(const struct mtrr_state *m, paddr_t pa,
|
||||
+ unsigned int order);
|
||||
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
|
||||
extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr,
|
||||
paddr_t spaddr, uint8_t gmtrr_mtype);
|
||||
-extern int epte_get_entry_emt(struct domain *, unsigned long gfn, mfn_t mfn,
|
||||
- unsigned int order, uint8_t *ipat,
|
||||
- bool_t direct_mmio);
|
||||
extern unsigned char pat_type_2_pte_flags(unsigned char pat_type);
|
||||
extern int hold_mtrr_updates_on_aps;
|
||||
extern void mtrr_aps_sync_begin(void);
|
||||
--
|
||||
2.31.1
|
||||
|
@ -0,0 +1,139 @@
|
||||
From 8ce6832518035a17e2d89a98235359f3d551f2c1 Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
|
||||
Date: Thu, 17 Jun 2021 18:00:57 +0200
|
||||
Subject: [PATCH 3/3] x86/ept: force WB cache attributes for grant and foreign
|
||||
maps
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Force WB type for grants and foreign pages. Those are usually mapped
|
||||
over unpopulated physical ranges in the p2m, and those ranges would
|
||||
usually be UC in the MTRR state, which is unlikely to be the correct
|
||||
cache attribute. It's also cumbersome (or even impossible) for the
|
||||
guest to be setting the MTRR type for all those mappings as WB, as
|
||||
MTRR ranges are finite.
|
||||
|
||||
Note that this is not an issue on AMD because WB cache attribute is
|
||||
already set on grants and foreign mappings in the p2m and MTRR types
|
||||
are ignored. Also on AMD Xen cannot force a cache attribute because of
|
||||
the lack of ignore PAT equivalent, so the behavior here slightly
|
||||
diverges between AMD and Intel (or EPT vs NPT/shadow).
|
||||
|
||||
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
|
||||
Reviewed-by: Jan Beulich <jbeulich@suse.com>
|
||||
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
|
||||
---
|
||||
xen/arch/x86/mm/p2m-ept.c | 38 +++++++++++++++++++++++++------
|
||||
xen/include/asm-x86/hvm/vmx/vmx.h | 2 +-
|
||||
2 files changed, 32 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
|
||||
index 542fe5ef34..41ab3c199a 100644
|
||||
--- a/xen/arch/x86/mm/p2m-ept.c
|
||||
+++ b/xen/arch/x86/mm/p2m-ept.c
|
||||
@@ -487,7 +487,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
|
||||
}
|
||||
|
||||
int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
|
||||
- unsigned int order, bool *ipat, bool direct_mmio)
|
||||
+ unsigned int order, bool *ipat, p2m_type_t type)
|
||||
{
|
||||
int gmtrr_mtype, hmtrr_mtype;
|
||||
struct vcpu *v = current;
|
||||
@@ -518,7 +518,8 @@ int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
}
|
||||
|
||||
- if ( !direct_mmio && !is_iommu_enabled(d) && !cache_flush_permitted(d) )
|
||||
+ if ( type != p2m_mmio_direct && !is_iommu_enabled(d) &&
|
||||
+ !cache_flush_permitted(d) )
|
||||
{
|
||||
*ipat = true;
|
||||
return MTRR_TYPE_WRBACK;
|
||||
@@ -535,9 +536,33 @@ int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
|
||||
}
|
||||
}
|
||||
|
||||
- if ( direct_mmio )
|
||||
+ switch ( type )
|
||||
+ {
|
||||
+ case p2m_mmio_direct:
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
|
||||
+ case p2m_grant_map_ro:
|
||||
+ case p2m_grant_map_rw:
|
||||
+ case p2m_map_foreign:
|
||||
+ /*
|
||||
+ * Force WB type for grants and foreign pages. Those are usually mapped
|
||||
+ * over unpopulated physical ranges in the p2m, and those would usually
|
||||
+ * be UC in the MTRR state, which is unlikely to be the correct cache
|
||||
+ * attribute. It's also cumbersome (or even impossible) for the guest
|
||||
+ * to be setting the MTRR type for all those mappings as WB, as MTRR
|
||||
+ * ranges are finite.
|
||||
+ *
|
||||
+ * Note that on AMD we cannot force a cache attribute because of the
|
||||
+ * lack of ignore PAT equivalent, so the behavior here slightly
|
||||
+ * diverges. See p2m_type_to_flags for the AMD attributes.
|
||||
+ */
|
||||
+ *ipat = true;
|
||||
+ return MTRR_TYPE_WRBACK;
|
||||
+
|
||||
+ default:
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
gmtrr_mtype = hvm_get_mem_pinned_cacheattr(d, gfn, order);
|
||||
if ( gmtrr_mtype >= 0 )
|
||||
{
|
||||
@@ -641,7 +666,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
|
||||
continue;
|
||||
e.emt = epte_get_entry_emt(p2m->domain, _gfn(gfn + i),
|
||||
_mfn(e.mfn), 0, &ipat,
|
||||
- e.sa_p2mt == p2m_mmio_direct);
|
||||
+ e.sa_p2mt);
|
||||
e.ipat = ipat;
|
||||
|
||||
nt = p2m_recalc_type(e.recalc, e.sa_p2mt, p2m, gfn + i);
|
||||
@@ -660,7 +685,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
|
||||
int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn),
|
||||
_mfn(e.mfn),
|
||||
level * EPT_TABLE_ORDER, &ipat,
|
||||
- e.sa_p2mt == p2m_mmio_direct);
|
||||
+ e.sa_p2mt);
|
||||
bool_t recalc = e.recalc;
|
||||
|
||||
if ( recalc && p2m_is_changeable(e.sa_p2mt) )
|
||||
@@ -782,7 +807,6 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
|
||||
unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ? (gfn | mfn_x(mfn)) : gfn;
|
||||
int ret, rc = 0;
|
||||
bool_t entry_written = 0;
|
||||
- bool_t direct_mmio = (p2mt == p2m_mmio_direct);
|
||||
bool ipat = false;
|
||||
bool_t need_modify_vtd_table = 1;
|
||||
bool_t vtd_pte_present = 0;
|
||||
@@ -896,7 +920,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
|
||||
if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
|
||||
{
|
||||
int emt = epte_get_entry_emt(p2m->domain, _gfn(gfn), mfn,
|
||||
- i * EPT_TABLE_ORDER, &ipat, direct_mmio);
|
||||
+ i * EPT_TABLE_ORDER, &ipat, p2mt);
|
||||
|
||||
if ( emt >= 0 )
|
||||
new_entry.emt = emt;
|
||||
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
|
||||
index f668ee1f09..0deb507490 100644
|
||||
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
|
||||
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
|
||||
@@ -600,7 +600,7 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
|
||||
void ept_walk_table(struct domain *d, unsigned long gfn);
|
||||
bool_t ept_handle_misconfig(uint64_t gpa);
|
||||
int epte_get_entry_emt(struct domain *d, gfn_t gfn, mfn_t mfn,
|
||||
- unsigned int order, bool *ipat, bool direct_mmio);
|
||||
+ unsigned int order, bool *ipat, p2m_type_t type);
|
||||
void setup_ept_dump(void);
|
||||
void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
|
||||
/* Locate an alternate p2m by its EPTP */
|
||||
--
|
||||
2.31.1
|
||||
|
Loading…
Reference in New Issue
Block a user