1
0
mirror of https://git.FreeBSD.org/ports.git synced 2024-10-19 19:59:43 +00:00

xen: update to 4.7.2

Apply build fixes for clang 4.0 (picked from upstream) and XSA-211.

Reviewed by:		bapt
MFH:			2017Q1
Sponsored by:		AsiaBSDCon
Differential revision:	https://reviews.freebsd.org/D9926
This commit is contained in:
Roger Pau Monné 2017-03-20 10:42:20 +00:00
parent a08d5ac3be
commit 6a777074d5
Notes: svn2git 2021-03-31 03:12:20 +00:00
svn path=/head/; revision=436536
21 changed files with 500 additions and 999 deletions

View File

@ -2,8 +2,8 @@
PORTNAME= xen
PKGNAMESUFFIX= -kernel
PORTVERSION= 4.7.1
PORTREVISION= 3
PORTVERSION= 4.7.2
PORTREVISION= 0
CATEGORIES= emulators
MASTER_SITES= http://downloads.xenproject.org/release/xen/${PORTVERSION}/
@ -40,14 +40,8 @@ PLIST_FILES= /boot/xen \
EXTRA_PATCHES= ${FILESDIR}/0001-xen-logdirty-prevent-preemption-if-finished.patch:-p1 \
${FILESDIR}/0002-xen-rework-paging_log_dirty_op-to-work-with-hvm-gues.patch:-p1 \
${FILESDIR}/kconf_arch.patch:-p1 \
${FILESDIR}/xsa191.patch:-p1 \
${FILESDIR}/xsa192.patch:-p1 \
${FILESDIR}/xsa193-4.7.patch:-p1 \
${FILESDIR}/xsa194.patch:-p1 \
${FILESDIR}/xsa195.patch:-p1 \
${FILESDIR}/xsa200-4.7.patch:-p1 \
${FILESDIR}/xsa204-4.7.patch:-p1 \
${FILESDIR}/xsa202.patch:-p1
${FILESDIR}/0001-x86-drop-unneeded-__packed-attributes.patch:-p1 \
${FILESDIR}/0002-build-clang-fix-XSM-dummy-policy-when-using-clang-4..patch:-p1
.include <bsd.port.options.mk>

View File

@ -1,3 +1,3 @@
TIMESTAMP = 1480690512
SHA256 (xen-4.7.1.tar.gz) = e87f4b0575e78657ee23d31470a15ecf1ce8c3a92a771cda46bbcd4d0d671ffe
SIZE (xen-4.7.1.tar.gz) = 20706864
TIMESTAMP = 1489059095
SHA256 (xen-4.7.2.tar.gz) = 61494a56d9251e2108080f95b0dc8e3d175f1ba4da34603fc07b91cfebf358d5
SIZE (xen-4.7.2.tar.gz) = 20714281

View File

@ -0,0 +1,122 @@
From 7de7d07d47cc389bc341f5524ea8415c3c78e378 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Fri, 10 Mar 2017 01:05:51 +0900
Subject: [PATCH 1/2] x86: drop unneeded __packed attributes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
There where a couple of unneeded packed attributes in several x86-specific
structures, that are obviously aligned. The only non-trivial one is
vmcb_struct, which has been checked to have the same layout with and without
the packed attribute using pahole. In that case add a build-time size check to
be on the safe side.
No functional change is expected as a result of this commit.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
xen/arch/x86/hvm/svm/vmcb.c | 3 +++
xen/arch/x86/x86_emulate/x86_emulate.h | 4 ++--
xen/include/asm-x86/hvm/svm/vmcb.h | 12 ++++++------
3 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 9ea014f..f982fc9 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -72,6 +72,9 @@ static int construct_vmcb(struct vcpu *v)
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
struct vmcb_struct *vmcb = arch_svm->vmcb;
+ /* Build-time check of the size of VMCB AMD structure. */
+ BUILD_BUG_ON(sizeof(*vmcb) != PAGE_SIZE);
+
vmcb->_general1_intercepts =
GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI |
GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT |
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h
index 17c86f3..bbdf16a 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -71,7 +71,7 @@ enum x86_swint_emulation {
* Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
* segment descriptor. It happens to match the format of an AMD SVM VMCB.
*/
-typedef union __packed segment_attributes {
+typedef union segment_attributes {
uint16_t bytes;
struct
{
@@ -91,7 +91,7 @@ typedef union __packed segment_attributes {
* Full state of a segment register (visible and hidden portions).
* Again, this happens to match the format of an AMD SVM VMCB.
*/
-struct __packed segment_register {
+struct segment_register {
uint16_t sel;
segment_attributes_t attr;
uint32_t limit;
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h
index bad2382..a3cd1b1 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -308,7 +308,7 @@ enum VMEXIT_EXITCODE
/* Definition of segment state is borrowed by the generic HVM code. */
typedef struct segment_register svm_segment_register_t;
-typedef union __packed
+typedef union
{
u64 bytes;
struct
@@ -322,7 +322,7 @@ typedef union __packed
} fields;
} eventinj_t;
-typedef union __packed
+typedef union
{
u64 bytes;
struct
@@ -340,7 +340,7 @@ typedef union __packed
} fields;
} vintr_t;
-typedef union __packed
+typedef union
{
u64 bytes;
struct
@@ -357,7 +357,7 @@ typedef union __packed
} fields;
} ioio_info_t;
-typedef union __packed
+typedef union
{
u64 bytes;
struct
@@ -366,7 +366,7 @@ typedef union __packed
} fields;
} lbrctrl_t;
-typedef union __packed
+typedef union
{
uint32_t bytes;
struct
@@ -401,7 +401,7 @@ typedef union __packed
#define IOPM_SIZE (12 * 1024)
#define MSRPM_SIZE (8 * 1024)
-struct __packed vmcb_struct {
+struct vmcb_struct {
u32 _cr_intercepts; /* offset 0x00 - cleanbit 0 */
u32 _dr_intercepts; /* offset 0x04 - cleanbit 0 */
u32 _exception_intercepts; /* offset 0x08 - cleanbit 0 */
--
2.10.1 (Apple Git-78)

View File

@ -0,0 +1,69 @@
From e5a1bf46187b0d39ec7e8c46e11c42027840747c Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger.pau@citrix.com>
Date: Fri, 10 Mar 2017 01:08:26 +0900
Subject: [PATCH 2/2] build/clang: fix XSM dummy policy when using clang 4.0
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
There seems to be some weird bug in clang 4.0 that prevents xsm_pmu_op from
working as expected, and vpmu.o ends up with a reference to
__xsm_action_mismatch_detected which makes the build fail:
[...]
ld -melf_x86_64_fbsd -T xen.lds -N prelink.o \
xen/common/symbols-dummy.o -o xen/.xen-syms.0
prelink.o: In function `xsm_default_action':
xen/include/xsm/dummy.h:80: undefined reference to `__xsm_action_mismatch_detected'
xen/xen/include/xsm/dummy.h:80: relocation truncated to fit: R_X86_64_PC32 against undefined symbol `__xsm_action_mismatch_detected'
ld: xen/xen/.xen-syms.0: hidden symbol `__xsm_action_mismatch_detected' isn't defined
Then doing a search in the objects files:
# find xen/ -type f -name '*.o' -print0 | xargs -0 bash -c \
'for filename; do nm "$filename" | \
grep -q __xsm_action_mismatch_detected && echo "$filename"; done' bash
xen/arch/x86/prelink.o
xen/arch/x86/cpu/vpmu.o
xen/arch/x86/cpu/built_in.o
xen/arch/x86/built_in.o
The current patch is the only way I've found to fix this so far, by simply
moving the XSM_PRIV check into the default case in xsm_pmu_op. This also fixes
the behavior of do_xenpmu_op, which will now return -EINVAL for unknown
XENPMU_* operations, instead of -EPERM when called by a privileged domain.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
xen/include/xsm/dummy.h | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index abbe282..0039c39 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -711,18 +711,13 @@ static XSM_INLINE int xsm_pmu_op (XSM_DEFAULT_ARG struct domain *d, unsigned int
XSM_ASSERT_ACTION(XSM_OTHER);
switch ( op )
{
- case XENPMU_mode_set:
- case XENPMU_mode_get:
- case XENPMU_feature_set:
- case XENPMU_feature_get:
- return xsm_default_action(XSM_PRIV, d, current->domain);
case XENPMU_init:
case XENPMU_finish:
case XENPMU_lvtpc_set:
case XENPMU_flush:
return xsm_default_action(XSM_HOOK, d, current->domain);
default:
- return -EPERM;
+ return xsm_default_action(XSM_PRIV, d, current->domain);
}
}
--
2.10.1 (Apple Git-78)

View File

@ -1,152 +0,0 @@
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/hvm: Fix the handling of non-present segments
In 32bit, the data segments may be NULL to indicate that the segment is
ineligible for use. In both 32bit and 64bit, the LDT selector may be NULL to
indicate that the entire LDT is ineligible for use. However, nothing in Xen
actually checks for this condition when performing other segmentation
checks. (Note however that limit and writeability checks are correctly
performed).
Neither Intel nor AMD specify the exact behaviour of loading a NULL segment.
Experimentally, AMD zeroes all attributes but leaves the base and limit
unmodified. Intel zeroes the base, sets the limit to 0xfffffff and resets the
attributes to just .G and .D/B.
The use of the segment information in the VMCB/VMCS is equivalent to a native
pipeline interacting with the segment cache. The present bit can therefore
have a subtly different meaning, and it is now cooked to uniformly indicate
whether the segment is usable or not.
GDTR and IDTR don't have access rights like the other segments, but for
consistency, they are treated as being present so no special casing is needed
elsewhere in the segmentation logic.
AMD hardware does not consider the present bit for %cs and %tr, and will
function as if they were present. They are therefore unconditionally set to
present when reading information from the VMCB, to maintain the new meaning of
usability.
Intel hardware has a separate unusable bit in the VMCS segment attributes.
This bit is inverted and stored in the present field, so the hvm code can work
with architecturally-common state.
This is XSA-191.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/arch/x86/hvm/hvm.c | 8 ++++++++
xen/arch/x86/hvm/svm/svm.c | 4 ++++
xen/arch/x86/hvm/vmx/vmx.c | 20 +++++++++++---------
xen/arch/x86/x86_emulate/x86_emulate.c | 4 ++++
4 files changed, 27 insertions(+), 9 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 704fd64..deb1783 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2512,6 +2512,10 @@ bool_t hvm_virtual_to_linear_addr(
*/
addr = (uint32_t)(addr + reg->base);
+ /* Segment not valid for use (cooked meaning of .p)? */
+ if ( !reg->attr.fields.p )
+ goto out;
+
switch ( access_type )
{
case hvm_access_read:
@@ -2767,6 +2771,10 @@ static int hvm_load_segment_selector(
hvm_get_segment_register(
v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
+ /* Segment not valid for use (cooked meaning of .p)? */
+ if ( !desctab.attr.fields.p )
+ goto fail;
+
/* Check against descriptor table limit. */
if ( ((sel & 0xfff8) + 7) > desctab.limit )
goto fail;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 16427f6..4cba406 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -627,6 +627,7 @@ static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
{
case x86_seg_cs:
memcpy(reg, &vmcb->cs, sizeof(*reg));
+ reg->attr.fields.p = 1;
reg->attr.fields.g = reg->limit > 0xFFFFF;
break;
case x86_seg_ds:
@@ -660,13 +661,16 @@ static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
case x86_seg_tr:
svm_sync_vmcb(v);
memcpy(reg, &vmcb->tr, sizeof(*reg));
+ reg->attr.fields.p = 1;
reg->attr.fields.type |= 0x2;
break;
case x86_seg_gdtr:
memcpy(reg, &vmcb->gdtr, sizeof(*reg));
+ reg->attr.bytes = 0x80;
break;
case x86_seg_idtr:
memcpy(reg, &vmcb->idtr, sizeof(*reg));
+ reg->attr.bytes = 0x80;
break;
case x86_seg_ldtr:
svm_sync_vmcb(v);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 9a8f694..a652c52 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1035,10 +1035,12 @@ void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
reg->sel = sel;
reg->limit = limit;
- reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
- /* Unusable flag is folded into Present flag. */
- if ( attr & (1u<<16) )
- reg->attr.fields.p = 0;
+ /*
+ * Fold VT-x representation into Xen's representation. The Present bit is
+ * unconditionally set to the inverse of unusable.
+ */
+ reg->attr.bytes =
+ (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
/* Adjust for virtual 8086 mode */
if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr
@@ -1118,11 +1120,11 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
}
}
- attr = ((attr & 0xf00) << 4) | (attr & 0xff);
-
- /* Not-present must mean unusable. */
- if ( !reg->attr.fields.p )
- attr |= (1u << 16);
+ /*
+ * Unfold Xen representation into VT-x representation. The unusable bit
+ * is unconditionally set to the inverse of present.
+ */
+ attr = (!(attr & (1u << 7)) << 16) | ((attr & 0xf00) << 4) | (attr & 0xff);
/* VMX has strict consistency requirement for flag G. */
attr |= !!(limit >> 20) << 15;
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index 7a707dc..7cb6f98 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1367,6 +1367,10 @@ protmode_load_seg(
&desctab, ctxt)) )
return rc;
+ /* Segment not valid for use (cooked meaning of .p)? */
+ if ( !desctab.attr.fields.p )
+ goto raise_exn;
+
/* Check against descriptor table limit. */
if ( ((sel & 0xfff8) + 7) > desctab.limit )
goto raise_exn;

View File

@ -1,64 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/HVM: don't load LDTR with VM86 mode attrs during task switch
Just like TR, LDTR is purely a protected mode facility and hence needs
to be loaded accordingly. Also move its loading to where it
architecurally belongs.
This is XSA-192.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2728,17 +2728,16 @@ static void hvm_unmap_entry(void *p)
}
static int hvm_load_segment_selector(
- enum x86_segment seg, uint16_t sel)
+ enum x86_segment seg, uint16_t sel, unsigned int eflags)
{
struct segment_register desctab, cs, segr;
struct desc_struct *pdesc, desc;
u8 dpl, rpl, cpl;
bool_t writable;
int fault_type = TRAP_invalid_tss;
- struct cpu_user_regs *regs = guest_cpu_user_regs();
struct vcpu *v = current;
- if ( regs->eflags & X86_EFLAGS_VM )
+ if ( eflags & X86_EFLAGS_VM )
{
segr.sel = sel;
segr.base = (uint32_t)sel << 4;
@@ -2986,6 +2985,8 @@ void hvm_task_switch(
if ( rc != HVMCOPY_okay )
goto out;
+ if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
+ goto out;
if ( hvm_set_cr3(tss.cr3, 1) )
goto out;
@@ -3008,13 +3009,12 @@ void hvm_task_switch(
}
exn_raised = 0;
- if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
- hvm_load_segment_selector(x86_seg_es, tss.es) ||
- hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
- hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
- hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
- hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
- hvm_load_segment_selector(x86_seg_gs, tss.gs) )
+ if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
+ hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
+ hvm_load_segment_selector(x86_seg_ss, tss.ss, tss.eflags) ||
+ hvm_load_segment_selector(x86_seg_ds, tss.ds, tss.eflags) ||
+ hvm_load_segment_selector(x86_seg_fs, tss.fs, tss.eflags) ||
+ hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
exn_raised = 1;
rc = hvm_copy_to_guest_virt(

View File

@ -1,68 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/PV: writes of %fs and %gs base MSRs require canonical addresses
Commit c42494acb2 ("x86: fix FS/GS base handling when using the
fsgsbase feature") replaced the use of wrmsr_safe() on these paths
without recognizing that wr{f,g}sbase() use just wrmsrl() and that the
WR{F,G}SBASE instructions also raise #GP for non-canonical input.
Similarly arch_set_info_guest() needs to prevent non-canonical
addresses from getting stored into state later to be loaded by context
switch code. For consistency also check stack pointers and LDT base.
DR0..3, otoh, already get properly checked in set_debugreg() (albeit
we discard the error there).
The SHADOW_GS_BASE check isn't strictly necessary, but I think we
better avoid trying the WRMSR if we know it's going to fail.
This is XSA-193.
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -890,7 +890,13 @@ int arch_set_info_guest(
{
if ( !compat )
{
- if ( !is_canonical_address(c.nat->user_regs.eip) ||
+ if ( !is_canonical_address(c.nat->user_regs.rip) ||
+ !is_canonical_address(c.nat->user_regs.rsp) ||
+ !is_canonical_address(c.nat->kernel_sp) ||
+ (c.nat->ldt_ents && !is_canonical_address(c.nat->ldt_base)) ||
+ !is_canonical_address(c.nat->fs_base) ||
+ !is_canonical_address(c.nat->gs_base_kernel) ||
+ !is_canonical_address(c.nat->gs_base_user) ||
!is_canonical_address(c.nat->event_callback_eip) ||
!is_canonical_address(c.nat->syscall_callback_eip) ||
!is_canonical_address(c.nat->failsafe_callback_eip) )
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2723,19 +2723,22 @@ static int emulate_privileged_op(struct
switch ( regs->_ecx )
{
case MSR_FS_BASE:
- if ( is_pv_32bit_domain(currd) )
+ if ( is_pv_32bit_domain(currd) ||
+ !is_canonical_address(msr_content) )
goto fail;
wrfsbase(msr_content);
v->arch.pv_vcpu.fs_base = msr_content;
break;
case MSR_GS_BASE:
- if ( is_pv_32bit_domain(currd) )
+ if ( is_pv_32bit_domain(currd) ||
+ !is_canonical_address(msr_content) )
goto fail;
wrgsbase(msr_content);
v->arch.pv_vcpu.gs_base_kernel = msr_content;
break;
case MSR_SHADOW_GS_BASE:
- if ( is_pv_32bit_domain(currd) )
+ if ( is_pv_32bit_domain(currd) ||
+ !is_canonical_address(msr_content) )
goto fail;
if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )
goto fail;

View File

@ -1,144 +0,0 @@
From 71096b016f7fd54a72af73576948cb25cf42ebcb Mon Sep 17 00:00:00 2001
From: Roger Pau Monné <roger.pau@citrix.com>Date: Wed, 2 Nov 2016 15:02:00 +0000
Subject: [PATCH] libelf: fix stack memory leak when loading 32 bit symbol
tables
The 32 bit Elf structs are smaller than the 64 bit ones, which means that
when loading them there's some padding left uninitialized at the end of each
struct (because the size indicated in e_ehsize and e_shentsize is
smaller than the size of elf_ehdr and elf_shdr).
Fix this by introducing a new helper that is used to set
[caller_]xdest_{base/size} and that takes care of performing the appropriate
memset of the region. This newly introduced helper is then used to set and
unset xdest_{base/size} in elf_load_bsdsyms. Now that the full struct
is zeroed, there's no need to specifically zero the undefined section.
This is XSA-194.
Suggested-by: Ian Jackson <ian.jackson@eu.citrix.com>
Also remove the open coded (and redundant with the earlier
elf_memset_unchecked()) use of caller_xdest_* from elf_init().
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
---
xen/common/libelf/libelf-loader.c | 14 +++-----------
xen/common/libelf/libelf-tools.c | 11 +++++++++--
xen/include/xen/libelf.h | 15 +++++++++------
3 files changed, 21 insertions(+), 19 deletions(-)
diff --git a/xen/common/libelf/libelf-loader.c b/xen/common/libelf/libelf-loader.c
index 4d3ae4d..bc1f87b 100644
--- a/xen/common/libelf/libelf-loader.c
+++ b/xen/common/libelf/libelf-loader.c
@@ -43,8 +43,6 @@ elf_errorstatus elf_init(struct elf_binary *elf, const char *image_input, size_t
elf->ehdr = ELF_MAKE_HANDLE(elf_ehdr, (elf_ptrval)image_input);
elf->class = elf_uval_3264(elf, elf->ehdr, e32.e_ident[EI_CLASS]);
elf->data = elf_uval_3264(elf, elf->ehdr, e32.e_ident[EI_DATA]);
- elf->caller_xdest_base = NULL;
- elf->caller_xdest_size = 0;
/* Sanity check phdr. */
offset = elf_uval(elf, elf->ehdr, e_phoff) +
@@ -284,9 +282,8 @@ do { \
#define SYMTAB_INDEX 1
#define STRTAB_INDEX 2
- /* Allow elf_memcpy_safe to write to symbol_header. */
- elf->caller_xdest_base = &header;
- elf->caller_xdest_size = sizeof(header);
+ /* Allow elf_memcpy_safe to write to header. */
+ elf_set_xdest(elf, &header, sizeof(header));
/*
* Calculate the position of the various elements in GUEST MEMORY SPACE.
@@ -319,11 +316,7 @@ do { \
elf_store_field_bitness(elf, header_handle, e_phentsize, 0);
elf_store_field_bitness(elf, header_handle, e_phnum, 0);
- /* Zero the undefined section. */
- section_handle = ELF_MAKE_HANDLE(elf_shdr,
- ELF_REALPTR2PTRVAL(&header.elf_header.section[SHN_UNDEF]));
shdr_size = elf_uval(elf, elf->ehdr, e_shentsize);
- elf_memset_safe(elf, ELF_HANDLE_PTRVAL(section_handle), 0, shdr_size);
/*
* The symtab section header is going to reside in section[SYMTAB_INDEX],
@@ -404,8 +397,7 @@ do { \
}
/* Remove permissions from elf_memcpy_safe. */
- elf->caller_xdest_base = NULL;
- elf->caller_xdest_size = 0;
+ elf_set_xdest(elf, NULL, 0);
#undef SYMTAB_INDEX
#undef STRTAB_INDEX
diff --git a/xen/common/libelf/libelf-tools.c b/xen/common/libelf/libelf-tools.c
index 5a4757b..e73e729 100644
--- a/xen/common/libelf/libelf-tools.c
+++ b/xen/common/libelf/libelf-tools.c
@@ -59,8 +59,7 @@ bool elf_access_ok(struct elf_binary * elf,
return 1;
if ( elf_ptrval_in_range(ptrval, size, elf->dest_base, elf->dest_size) )
return 1;
- if ( elf_ptrval_in_range(ptrval, size,
- elf->caller_xdest_base, elf->caller_xdest_size) )
+ if ( elf_ptrval_in_range(ptrval, size, elf->xdest_base, elf->xdest_size) )
return 1;
elf_mark_broken(elf, "out of range access");
return 0;
@@ -373,6 +372,14 @@ bool elf_phdr_is_loadable(struct elf_binary *elf, ELF_HANDLE_DECL(elf_phdr) phdr
return ((p_type == PT_LOAD) && (p_flags & (PF_R | PF_W | PF_X)) != 0);
}
+void elf_set_xdest(struct elf_binary *elf, void *addr, uint64_t size)
+{
+ elf->xdest_base = addr;
+ elf->xdest_size = size;
+ if ( addr != NULL )
+ elf_memset_safe(elf, ELF_REALPTR2PTRVAL(addr), 0, size);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/include/xen/libelf.h b/xen/include/xen/libelf.h
index 95b5370..cf62bc7 100644
--- a/xen/include/xen/libelf.h
+++ b/xen/include/xen/libelf.h
@@ -210,13 +210,11 @@ struct elf_binary {
uint64_t bsd_symtab_pend;
/*
- * caller's other acceptable destination
- *
- * Again, these are trusted and must be valid (or 0) so long
- * as the struct elf_binary is in use.
+ * caller's other acceptable destination.
+ * Set by elf_set_xdest. Do not set these directly.
*/
- void *caller_xdest_base;
- uint64_t caller_xdest_size;
+ void *xdest_base;
+ uint64_t xdest_size;
#ifndef __XEN__
/* misc */
@@ -494,5 +492,10 @@ static inline void ELF_ADVANCE_DEST(struct elf_binary *elf, uint64_t amount)
}
}
+/* Specify a (single) additional destination, to which the image may
+ * cause writes. As with dest_base and dest_size, the values provided
+ * are trusted and must be valid so long as the struct elf_binary
+ * is in use or until elf_set_xdest(,0,0) is called. */
+void elf_set_xdest(struct elf_binary *elf, void *addr, uint64_t size);
#endif /* __XEN_LIBELF_H__ */
--
2.1.4

View File

@ -1,45 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: x86emul: fix huge bit offset handling
We must never chop off the high 32 bits.
This is XSA-195.
Reported-by: George Dunlap <george.dunlap@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -2549,6 +2549,12 @@ x86_emulate(
else
{
/*
+ * Instructions such as bt can reference an arbitrary offset from
+ * their memory operand, but the instruction doing the actual
+ * emulation needs the appropriate op_bytes read from memory.
+ * Adjust both the source register and memory operand to make an
+ * equivalent instruction.
+ *
* EA += BitOffset DIV op_bytes*8
* BitOffset = BitOffset MOD op_bytes*8
* DIV truncates towards negative infinity.
@@ -2560,14 +2566,15 @@ x86_emulate(
src.val = (int32_t)src.val;
if ( (long)src.val < 0 )
{
- unsigned long byte_offset;
- byte_offset = op_bytes + (((-src.val-1) >> 3) & ~(op_bytes-1));
+ unsigned long byte_offset =
+ op_bytes + (((-src.val - 1) >> 3) & ~(op_bytes - 1L));
+
ea.mem.off -= byte_offset;
src.val = (byte_offset << 3) + src.val;
}
else
{
- ea.mem.off += (src.val >> 3) & ~(op_bytes - 1);
+ ea.mem.off += (src.val >> 3) & ~(op_bytes - 1L);
src.val &= (op_bytes << 3) - 1;
}
}

View File

@ -1,55 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: x86emul: CMPXCHG8B ignores operand size prefix
Otherwise besides mis-handling the instruction, the comparison failure
case would result in uninitialized stack data being handed back to the
guest in rDX:rAX (32 bits leaked for 32-bit guests, 96 bits for 64-bit
ones).
This is XSA-200.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -435,6 +435,24 @@ int main(int argc, char **argv)
goto fail;
printf("okay\n");
+ printf("%-40s", "Testing cmpxchg8b (%edi) [opsize]...");
+ instr[0] = 0x66; instr[1] = 0x0f; instr[2] = 0xc7; instr[3] = 0x0f;
+ res[0] = 0x12345678;
+ res[1] = 0x87654321;
+ regs.eflags = 0x200;
+ regs.eip = (unsigned long)&instr[0];
+ regs.edi = (unsigned long)res;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (res[0] != 0x12345678) ||
+ (res[1] != 0x87654321) ||
+ (regs.eax != 0x12345678) ||
+ (regs.edx != 0x87654321) ||
+ ((regs.eflags&0x240) != 0x200) ||
+ (regs.eip != (unsigned long)&instr[4]) )
+ goto fail;
+ printf("okay\n");
+
printf("%-40s", "Testing movsxbd (%%eax),%%ecx...");
instr[0] = 0x0f; instr[1] = 0xbe; instr[2] = 0x08;
regs.eflags = 0x200;
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -4775,8 +4775,12 @@ x86_emulate(
generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1);
generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
if ( op_bytes == 8 )
+ {
host_and_vcpu_must_have(cx16);
- op_bytes *= 2;
+ op_bytes = 16;
+ }
+ else
+ op_bytes = 8;
/* Get actual old value. */
if ( (rc = ops->read(ea.mem.seg, ea.mem.off, old, op_bytes,

View File

@ -1,75 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: x86: force EFLAGS.IF on when exiting to PV guests
Guest kernels modifying instructions in the process of being emulated
for another of their vCPU-s may effect EFLAGS.IF to be cleared upon
next exiting to guest context, by converting the being emulated
instruction to CLI (at the right point in time). Prevent any such bad
effects by always forcing EFLAGS.IF on. And to cover hypothetical other
similar issues, also force EFLAGS.{IOPL,NT,VM} to zero.
This is XSA-202.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -109,6 +109,8 @@ compat_process_trap:
/* %rbx: struct vcpu, interrupts disabled */
ENTRY(compat_restore_all_guest)
ASSERT_INTERRUPTS_DISABLED
+ mov $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11d
+ and UREGS_eflags(%rsp),%r11d
.Lcr4_orig:
.skip .Lcr4_alt_end - .Lcr4_alt, 0x90
.Lcr4_orig_end:
@@ -144,6 +146,8 @@ ENTRY(compat_restore_all_guest)
(.Lcr4_orig_end - .Lcr4_orig), \
(.Lcr4_alt_end - .Lcr4_alt)
.popsection
+ or $X86_EFLAGS_IF,%r11
+ mov %r11d,UREGS_eflags(%rsp)
RESTORE_ALL adj=8 compat=1
.Lft0: iretq
_ASM_PRE_EXTABLE(.Lft0, handle_exception)
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -40,28 +40,29 @@ restore_all_guest:
testw $TRAP_syscall,4(%rsp)
jz iret_exit_to_guest
+ movq 24(%rsp),%r11 # RFLAGS
+ andq $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),%r11
+ orq $X86_EFLAGS_IF,%r11
+
/* Don't use SYSRET path if the return address is not canonical. */
movq 8(%rsp),%rcx
sarq $47,%rcx
incl %ecx
cmpl $1,%ecx
- ja .Lforce_iret
+ movq 8(%rsp),%rcx # RIP
+ ja iret_exit_to_guest
cmpw $FLAT_USER_CS32,16(%rsp)# CS
- movq 8(%rsp),%rcx # RIP
- movq 24(%rsp),%r11 # RFLAGS
movq 32(%rsp),%rsp # RSP
je 1f
sysretq
1: sysretl
-.Lforce_iret:
- /* Mimic SYSRET behavior. */
- movq 8(%rsp),%rcx # RIP
- movq 24(%rsp),%r11 # RFLAGS
ALIGN
/* No special register assumptions. */
iret_exit_to_guest:
+ andl $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),24(%rsp)
+ orl $X86_EFLAGS_IF,24(%rsp)
addq $8,%rsp
.Lft0: iretq
_ASM_PRE_EXTABLE(.Lft0, handle_exception)

View File

@ -1,69 +0,0 @@
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Sun, 18 Dec 2016 15:42:59 +0000
Subject: [PATCH] x86/emul: Correct the handling of eflags with SYSCALL
A singlestep #DB is determined by the resulting eflags value from the
execution of SYSCALL, not the original eflags value.
By using the original eflags value, we negate the guest kernels attempt to
protect itself from a privilege escalation by masking TF.
Introduce a tf boolean and have the SYSCALL emulation recalculate it
after the instruction is complete.
This is XSA-204
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/arch/x86/x86_emulate/x86_emulate.c | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index bca7045..abe442e 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1582,6 +1582,7 @@ x86_emulate(
union vex vex = {};
unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes;
bool_t lock_prefix = 0;
+ bool_t tf = !!(ctxt->regs->eflags & EFLG_TF);
int override_seg = -1, rc = X86EMUL_OKAY;
struct operand src = { .reg = REG_POISON };
struct operand dst = { .reg = REG_POISON };
@@ -3910,9 +3911,8 @@ x86_emulate(
}
no_writeback:
- /* Inject #DB if single-step tracing was enabled at instruction start. */
- if ( (ctxt->regs->eflags & EFLG_TF) && (rc == X86EMUL_OKAY) &&
- (ops->inject_hw_exception != NULL) )
+ /* Should a singlestep #DB be raised? */
+ if ( tf && (rc == X86EMUL_OKAY) && (ops->inject_hw_exception != NULL) )
rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION;
/* Commit shadow register state. */
@@ -4143,6 +4143,23 @@ x86_emulate(
(rc = ops->write_segment(x86_seg_ss, &ss, ctxt)) )
goto done;
+ /*
+ * SYSCALL (unlike most instructions) evaluates its singlestep action
+ * based on the resulting EFLG_TF, not the starting EFLG_TF.
+ *
+ * As the #DB is raised after the CPL change and before the OS can
+ * switch stack, it is a large risk for privilege escalation.
+ *
+ * 64bit kernels should mask EFLG_TF in MSR_FMASK to avoid any
+ * vulnerability. Running the #DB handler on an IST stack is also a
+ * mitigation.
+ *
+ * 32bit kernels have no ability to mask EFLG_TF at all. Their only
+ * mitigation is to use a task gate for handling #DB (or to not use
+ * enable EFER.SCE to start with).
+ */
+ tf = !!(_regs.eflags & EFLG_TF);
+
break;
}

View File

@ -2,8 +2,8 @@
PORTNAME= xen
PKGNAMESUFFIX= -tools
PORTVERSION= 4.7.1
PORTREVISION= 5
PORTVERSION= 4.7.2
PORTREVISION= 0
CATEGORIES= sysutils emulators
MASTER_SITES= http://downloads.xenproject.org/release/xen/${PORTVERSION}/
@ -44,10 +44,10 @@ QEMU_ARGS= --disable-gtk \
--cxx=c++
EXTRA_PATCHES= ${FILESDIR}/var_paths.patch:-p1 \
${FILESDIR}/xsa198.patch:-p1 \
${FILESDIR}/0001-libxl-fix-creation-of-pkgconf-install-dir.patch:-p1 \
${FILESDIR}/0001-tools-configure-fix-pkg-config-install-path-for-Free.patch:-p1 \
${FILESDIR}/0001-libs-xenstore-set-correct-FreeBSD-device.patch:-p1
${FILESDIR}/0001-libs-xenstore-set-correct-FreeBSD-device.patch:-p1 \
${FILESDIR}/kdd.patch:-p1
CONFIGURE_ARGS+= --with-extra-qemuu-configure-args="${QEMU_ARGS}" \
--with-system-seabios=${LOCALBASE}/share/seabios/bios.bin

View File

@ -1,3 +1,3 @@
TIMESTAMP = 1480690512
SHA256 (xen-4.7.1.tar.gz) = e87f4b0575e78657ee23d31470a15ecf1ce8c3a92a771cda46bbcd4d0d671ffe
SIZE (xen-4.7.1.tar.gz) = 20706864
TIMESTAMP = 1489085975
SHA256 (xen-4.7.2.tar.gz) = 61494a56d9251e2108080f95b0dc8e3d175f1ba4da34603fc07b91cfebf358d5
SIZE (xen-4.7.2.tar.gz) = 20714281

View File

@ -0,0 +1,36 @@
Subject: [PATCH] tools/kdd: don't use a pointer to an unaligned field.
The 'val' field in the packet is byte-aligned (because it is part of a
packed struct), but the pointer argument to kdd_rdmsr() has the normal
alignment constraints for a uint64_t *. Use a local variable to make sure
the passed pointer has the correct alignment.
Reported-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Tim Deegan <tim@xen.org>
---
tools/debugger/kdd/kdd.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/tools/debugger/kdd/kdd.c b/tools/debugger/kdd/kdd.c
index 70f007e..1bd5dd5 100644
--- a/tools/debugger/kdd/kdd.c
+++ b/tools/debugger/kdd/kdd.c
@@ -710,11 +710,13 @@ static void kdd_handle_read_ctrl(kdd_state *s)
static void kdd_handle_read_msr(kdd_state *s)
{
uint32_t msr = s->rxp.cmd.msr.msr;
+ uint64_t val;
int ok;
KDD_LOG(s, "Read MSR 0x%"PRIx32"\n", msr);
- ok = (kdd_rdmsr(s->guest, s->cpuid, msr, &s->txp.cmd.msr.val) == 0);
+ ok = (kdd_rdmsr(s->guest, s->cpuid, msr, &val) == 0);
s->txp.cmd.msr.msr = msr;
+ s->txp.cmd.msr.val = val;
s->txp.cmd.msr.status = (ok ? KDD_STATUS_SUCCESS : KDD_STATUS_FAILURE);
kdd_send_cmd(s, KDD_CMD_READ_MSR, 0);
}
--
2.7.4

View File

@ -1,63 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: xen: fix ioreq handling
Avoid double fetches and bounds check size to avoid overflowing
internal variables.
This is XSA-197.
Reported-by: yanghongke <yanghongke@huawei.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
--- a/xen-hvm.c
+++ b/xen-hvm.c
@@ -810,6 +810,10 @@ static void cpu_ioreq_pio(ioreq_t *req)
trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
req->data, req->count, req->size);
+ if (req->size > sizeof(uint32_t)) {
+ hw_error("PIO: bad size (%u)", req->size);
+ }
+
if (req->dir == IOREQ_READ) {
if (!req->data_is_ptr) {
req->data = do_inp(req->addr, req->size);
@@ -846,6 +850,10 @@ static void cpu_ioreq_move(ioreq_t *req)
trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
req->data, req->count, req->size);
+ if (req->size > sizeof(req->data)) {
+ hw_error("MMIO: bad size (%u)", req->size);
+ }
+
if (!req->data_is_ptr) {
if (req->dir == IOREQ_READ) {
for (i = 0; i < req->count; i++) {
@@ -1010,11 +1018,13 @@ static int handle_buffered_iopage(XenIOS
req.df = 1;
req.type = buf_req->type;
req.data_is_ptr = 0;
+ xen_rmb();
qw = (req.size == 8);
if (qw) {
buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
IOREQ_BUFFER_SLOT_NUM];
req.data |= ((uint64_t)buf_req->data) << 32;
+ xen_rmb();
}
handle_ioreq(state, &req);
@@ -1045,7 +1055,11 @@ static void cpu_handle_ioreq(void *opaqu
handle_buffered_iopage(state);
if (req) {
- handle_ioreq(state, req);
+ ioreq_t copy = *req;
+
+ xen_rmb();
+ handle_ioreq(state, &copy);
+ req->data = copy.data;
if (req->state != STATE_IOREQ_INPROCESS) {
fprintf(stderr, "Badness in I/O request ... not in service?!: "

View File

@ -1,62 +0,0 @@
From 71a389ae940bc52bf897a6e5becd73fd8ede94c5 Mon Sep 17 00:00:00 2001
From: Ian Jackson <ian.jackson@eu.citrix.com>
Date: Thu, 3 Nov 2016 16:37:40 +0000
Subject: [PATCH] pygrub: Properly quote results, when returning them to the
caller:
* When the caller wants sexpr output, use `repr()'
This is what Xend expects.
The returned S-expressions are now escaped and quoted by Python,
generally using '...'. Previously kernel and ramdisk were unquoted
and args was quoted with "..." but without proper escaping. This
change may break toolstacks which do not properly dequote the
returned S-expressions.
* When the caller wants "simple" output, crash if the delimiter is
contained in the returned value.
With --output-format=simple it does not seem like this could ever
happen, because the bootloader config parsers all take line-based
input from the various bootloader config files.
With --output-format=simple0, this can happen if the bootloader
config file contains nul bytes.
This is XSA-198.
Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
Tested-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
tools/pygrub/src/pygrub | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/tools/pygrub/src/pygrub b/tools/pygrub/src/pygrub
index 40f9584..dd0c8f7 100755
--- a/tools/pygrub/src/pygrub
+++ b/tools/pygrub/src/pygrub
@@ -721,14 +721,17 @@ def sniff_netware(fs, cfg):
return cfg
def format_sxp(kernel, ramdisk, args):
- s = "linux (kernel %s)" % kernel
+ s = "linux (kernel %s)" % repr(kernel)
if ramdisk:
- s += "(ramdisk %s)" % ramdisk
+ s += "(ramdisk %s)" % repr(ramdisk)
if args:
- s += "(args \"%s\")" % args
+ s += "(args %s)" % repr(args)
return s
def format_simple(kernel, ramdisk, args, sep):
+ for check in (kernel, ramdisk, args):
+ if check is not None and sep in check:
+ raise RuntimeError, "simple format cannot represent delimiter-containing value"
s = ("kernel %s" % kernel) + sep
if ramdisk:
s += ("ramdisk %s" % ramdisk) + sep
--
2.1.4

View File

@ -1,53 +0,0 @@
From 7eaaf4ba68fab40f1945d761438bdaa44fbf37d7 Mon Sep 17 00:00:00 2001
From: Li Qiang <liqiang6-s@360.cn>
Date: Thu, 9 Feb 2017 14:36:41 -0800
Subject: [PATCH] cirrus: fix oob access issue (CVE-2017-2615)
When doing bitblt copy in backward mode, we should minus the
blt width first just like the adding in the forward mode. This
can avoid the oob access of the front of vga's vram.
This is XSA-208.
upstream-commit-id: 62d4c6bd5263bb8413a06c80144fc678df6dfb64
Signed-off-by: Li Qiang <liqiang6-s@360.cn>
{ kraxel: with backward blits (negative pitch) addr is the topmost
address, so check it as-is against vram size ]
Cc: qemu-stable@nongnu.org
Cc: P J P <ppandit@redhat.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Wolfgang Bumiller <w.bumiller@proxmox.com>
Fixes: d3532a0db02296e687711b8cdc7791924efccea0 (CVE-2014-8106)
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Message-id: 1485938101-26602-1-git-send-email-kraxel@redhat.com
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
---
hw/display/cirrus_vga.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 5198037..7bf3707 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -272,10 +272,9 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s,
{
if (pitch < 0) {
int64_t min = addr
- + ((int64_t)s->cirrus_blt_height-1) * pitch;
- int32_t max = addr
- + s->cirrus_blt_width;
- if (min < 0 || max >= s->vga.vram_size) {
+ + ((int64_t)s->cirrus_blt_height - 1) * pitch
+ - s->cirrus_blt_width;
+ if (min < -1 || addr >= s->vga.vram_size) {
return true;
}
} else {
--
2.10.1 (Apple Git-78)

View File

@ -1,72 +0,0 @@
From 52b7f43c8fa185ab856bcaacda7abc9a6fc07f84 Mon Sep 17 00:00:00 2001
From: Bruce Rogers <brogers@suse.com>
Date: Tue, 21 Feb 2017 10:54:38 -0800
Subject: [PATCH] display: cirrus: ignore source pitch value as needed in
blit_is_unsafe
Commit 4299b90 added a check which is too broad, given that the source
pitch value is not required to be initialized for solid fill operations.
This patch refines the blit_is_unsafe() check to ignore source pitch in
that case. After applying the above commit as a security patch, we
noticed the SLES 11 SP4 guest gui failed to initialize properly.
Signed-off-by: Bruce Rogers <brogers@suse.com>
Message-id: 20170109203520.5619-1-brogers@suse.com
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
hw/display/cirrus_vga.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 7bf3707..34a6900 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -288,7 +288,7 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s,
return false;
}
-static bool blit_is_unsafe(struct CirrusVGAState *s)
+static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only)
{
/* should be the case, see cirrus_bitblt_start */
assert(s->cirrus_blt_width > 0);
@@ -302,6 +302,9 @@ static bool blit_is_unsafe(struct CirrusVGAState *s)
s->cirrus_blt_dstaddr & s->cirrus_addr_mask)) {
return true;
}
+ if (dst_only) {
+ return false;
+ }
if (blit_region_is_unsafe(s, s->cirrus_blt_srcpitch,
s->cirrus_blt_srcaddr & s->cirrus_addr_mask)) {
return true;
@@ -667,7 +670,7 @@ static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s,
dst = s->vga.vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask);
- if (blit_is_unsafe(s))
+ if (blit_is_unsafe(s, false))
return 0;
(*s->cirrus_rop) (s, dst, src,
@@ -685,7 +688,7 @@ static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop)
{
cirrus_fill_t rop_func;
- if (blit_is_unsafe(s)) {
+ if (blit_is_unsafe(s, true)) {
return 0;
}
rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1];
@@ -784,7 +787,7 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s)
{
- if (blit_is_unsafe(s))
+ if (blit_is_unsafe(s, false))
return 0;
cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr,
--
2.10.1 (Apple Git-78)

View File

@ -1,57 +0,0 @@
From: Gerd Hoffmann <kraxel@redhat.com>
Subject: [PATCH 3/3] cirrus: add blit_is_unsafe call to cirrus_bitblt_cputovideo
CIRRUS_BLTMODE_MEMSYSSRC blits do NOT check blit destination
and blit width, at all. Oops. Fix it.
Security impact: high.
The missing blit destination check allows to write to host memory.
Basically same as CVE-2014-8106 for the other blit variants.
The missing blit width check allows to overflow cirrus_bltbuf,
with the attractive target cirrus_srcptr (current cirrus_bltbuf write
position) being located right after cirrus_bltbuf in CirrusVGAState.
Due to cirrus emulation writing cirrus_bltbuf bytewise the attacker
hasn't full control over cirrus_srcptr though, only one byte can be
changed. Once the first byte has been modified further writes land
elsewhere.
[ This is CVE-2017-2620 / XSA-209 - Ian Jackson ]
Reported-by: Gerd Hoffmann <ghoffman@redhat.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
hw/display/cirrus_vga.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 0e47cf8..a093dc8 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -899,6 +899,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
{
int w;
+ if (blit_is_unsafe(s, true)) {
+ return 0;
+ }
+
s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_MEMSYSSRC;
s->cirrus_srcptr = &s->cirrus_bltbuf[0];
s->cirrus_srcptr_end = &s->cirrus_bltbuf[0];
@@ -924,6 +928,10 @@ static int cirrus_bitblt_cputovideo(CirrusVGAState * s)
}
s->cirrus_srccounter = s->cirrus_blt_srcpitch * s->cirrus_blt_height;
}
+
+ /* the blit_is_unsafe call above should catch this */
+ assert(s->cirrus_blt_srcpitch <= CIRRUS_BLTBUFSIZE);
+
s->cirrus_srcptr = s->cirrus_bltbuf;
s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch;
cirrus_update_memory_access(s);
--
1.8.3.1

View File

@ -0,0 +1,259 @@
From 9de536fbc2be97ae887560f08f0fd824efa3d5db Mon Sep 17 00:00:00 2001
From: Gerd Hoffmann <kraxel@redhat.com>
Date: Tue, 14 Feb 2017 19:09:59 +0100
Subject: [PATCH] cirrus/vnc: zap bitblit support from console code.
There is a special code path (dpy_gfx_copy) to allow graphic emulation
notify user interface code about bitblit operations carryed out by
guests. It is supported by cirrus and vnc server. The intended purpose
is to optimize display scrolls and just send over the scroll op instead
of a full display update.
This is rarely used these days though because modern guests simply don't
use the cirrus blitter any more. Any linux guest using the cirrus drm
driver doesn't. Any windows guest newer than winxp doesn't ship with a
cirrus driver any more and thus uses the cirrus as simple framebuffer.
So this code tends to bitrot and bugs can go unnoticed for a long time.
See for example commit "3e10c3e vnc: fix qemu crash because of SIGSEGV"
which fixes a bug lingering in the code for almost a year, added by
commit "c7628bf vnc: only alloc server surface with clients connected".
Also the vnc server will throttle the frame rate in case it figures the
network can't keep up (send buffers are full). This doesn't work with
dpy_gfx_copy, for any copy operation sent to the vnc client we have to
send all outstanding updates beforehand, otherwise the vnc client might
run the client side blit on outdated data and thereby corrupt the
display. So this dpy_gfx_copy "optimization" might even make things
worse on slow network links.
Lets kill it once for all.
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
hw/display/cirrus_vga.c | 12 ++-----
include/ui/console.h | 7 ----
ui/console.c | 28 ---------------
ui/vnc.c | 91 -------------------------------------------------
4 files changed, 3 insertions(+), 135 deletions(-)
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 5901250..2841676 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -758,11 +758,6 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
}
}
- /* we have to flush all pending changes so that the copy
- is generated at the appropriate moment in time */
- if (notify)
- graphic_hw_update(s->vga.con);
-
(*s->cirrus_rop) (s, s->vga.vram_ptr +
(s->cirrus_blt_dstaddr & s->cirrus_addr_mask),
s->vga.vram_ptr +
@@ -771,10 +766,9 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
s->cirrus_blt_width, s->cirrus_blt_height);
if (notify) {
- qemu_console_copy(s->vga.con,
- sx, sy, dx, dy,
- s->cirrus_blt_width / depth,
- s->cirrus_blt_height);
+ dpy_gfx_update(s->vga.con, dx, dy,
+ s->cirrus_blt_width / depth,
+ s->cirrus_blt_height);
}
/* we don't have to notify the display that this portion has
diff --git a/include/ui/console.h b/include/ui/console.h
index 047a2b4..ed07065 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -166,9 +166,6 @@ typedef struct DisplayChangeListenerOps {
int x, int y, int w, int h);
void (*dpy_gfx_switch)(DisplayChangeListener *dcl,
struct DisplaySurface *new_surface);
- void (*dpy_gfx_copy)(DisplayChangeListener *dcl,
- int src_x, int src_y,
- int dst_x, int dst_y, int w, int h);
bool (*dpy_gfx_check_format)(DisplayChangeListener *dcl,
pixman_format_code_t format);
@@ -233,8 +230,6 @@ int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info);
void dpy_gfx_update(QemuConsole *con, int x, int y, int w, int h);
void dpy_gfx_replace_surface(QemuConsole *con,
DisplaySurface *surface);
-void dpy_gfx_copy(QemuConsole *con, int src_x, int src_y,
- int dst_x, int dst_y, int w, int h);
void dpy_text_cursor(QemuConsole *con, int x, int y);
void dpy_text_update(QemuConsole *con, int x, int y, int w, int h);
void dpy_text_resize(QemuConsole *con, int w, int h);
@@ -329,8 +324,6 @@ void text_consoles_set_display(DisplayState *ds);
void console_select(unsigned int index);
void console_color_init(DisplayState *ds);
void qemu_console_resize(QemuConsole *con, int width, int height);
-void qemu_console_copy(QemuConsole *con, int src_x, int src_y,
- int dst_x, int dst_y, int w, int h);
DisplaySurface *qemu_console_surface(QemuConsole *con);
/* console-gl.c */
diff --git a/ui/console.c b/ui/console.c
index 75fc492..72d91cb 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -1495,27 +1495,6 @@ static void dpy_refresh(DisplayState *s)
}
}
-void dpy_gfx_copy(QemuConsole *con, int src_x, int src_y,
- int dst_x, int dst_y, int w, int h)
-{
- DisplayState *s = con->ds;
- DisplayChangeListener *dcl;
-
- if (!qemu_console_is_visible(con)) {
- return;
- }
- QLIST_FOREACH(dcl, &s->listeners, next) {
- if (con != (dcl->con ? dcl->con : active_console)) {
- continue;
- }
- if (dcl->ops->dpy_gfx_copy) {
- dcl->ops->dpy_gfx_copy(dcl, src_x, src_y, dst_x, dst_y, w, h);
- } else { /* TODO */
- dcl->ops->dpy_gfx_update(dcl, dst_x, dst_y, w, h);
- }
- }
-}
-
void dpy_text_cursor(QemuConsole *con, int x, int y)
{
DisplayState *s = con->ds;
@@ -1968,13 +1947,6 @@ void qemu_console_resize(QemuConsole *s, int width, int height)
dpy_gfx_replace_surface(s, surface);
}
-void qemu_console_copy(QemuConsole *con, int src_x, int src_y,
- int dst_x, int dst_y, int w, int h)
-{
- assert(con->console_type == GRAPHIC_CONSOLE);
- dpy_gfx_copy(con, src_x, src_y, dst_x, dst_y, w, h);
-}
-
DisplaySurface *qemu_console_surface(QemuConsole *console)
{
return console->surface;
diff --git a/ui/vnc.c b/ui/vnc.c
index 52c6809..61ab611 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -908,96 +908,6 @@ int vnc_send_framebuffer_update(VncState *vs, int x, int y, int w, int h)
return n;
}
-static void vnc_copy(VncState *vs, int src_x, int src_y, int dst_x, int dst_y, int w, int h)
-{
- /* send bitblit op to the vnc client */
- vnc_lock_output(vs);
- vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
- vnc_write_u8(vs, 0);
- vnc_write_u16(vs, 1); /* number of rects */
- vnc_framebuffer_update(vs, dst_x, dst_y, w, h, VNC_ENCODING_COPYRECT);
- vnc_write_u16(vs, src_x);
- vnc_write_u16(vs, src_y);
- vnc_unlock_output(vs);
- vnc_flush(vs);
-}
-
-static void vnc_dpy_copy(DisplayChangeListener *dcl,
- int src_x, int src_y,
- int dst_x, int dst_y, int w, int h)
-{
- VncDisplay *vd = container_of(dcl, VncDisplay, dcl);
- VncState *vs, *vn;
- uint8_t *src_row;
- uint8_t *dst_row;
- int i, x, y, pitch, inc, w_lim, s;
- int cmp_bytes;
-
- vnc_refresh_server_surface(vd);
- QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) {
- if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
- vs->force_update = 1;
- vnc_update_client(vs, 1, true);
- /* vs might be free()ed here */
- }
- }
-
- /* do bitblit op on the local surface too */
- pitch = vnc_server_fb_stride(vd);
- src_row = vnc_server_fb_ptr(vd, src_x, src_y);
- dst_row = vnc_server_fb_ptr(vd, dst_x, dst_y);
- y = dst_y;
- inc = 1;
- if (dst_y > src_y) {
- /* copy backwards */
- src_row += pitch * (h-1);
- dst_row += pitch * (h-1);
- pitch = -pitch;
- y = dst_y + h - 1;
- inc = -1;
- }
- w_lim = w - (VNC_DIRTY_PIXELS_PER_BIT - (dst_x % VNC_DIRTY_PIXELS_PER_BIT));
- if (w_lim < 0) {
- w_lim = w;
- } else {
- w_lim = w - (w_lim % VNC_DIRTY_PIXELS_PER_BIT);
- }
- for (i = 0; i < h; i++) {
- for (x = 0; x <= w_lim;
- x += s, src_row += cmp_bytes, dst_row += cmp_bytes) {
- if (x == w_lim) {
- if ((s = w - w_lim) == 0)
- break;
- } else if (!x) {
- s = (VNC_DIRTY_PIXELS_PER_BIT -
- (dst_x % VNC_DIRTY_PIXELS_PER_BIT));
- s = MIN(s, w_lim);
- } else {
- s = VNC_DIRTY_PIXELS_PER_BIT;
- }
- cmp_bytes = s * VNC_SERVER_FB_BYTES;
- if (memcmp(src_row, dst_row, cmp_bytes) == 0)
- continue;
- memmove(dst_row, src_row, cmp_bytes);
- QTAILQ_FOREACH(vs, &vd->clients, next) {
- if (!vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
- set_bit(((x + dst_x) / VNC_DIRTY_PIXELS_PER_BIT),
- vs->dirty[y]);
- }
- }
- }
- src_row += pitch - w * VNC_SERVER_FB_BYTES;
- dst_row += pitch - w * VNC_SERVER_FB_BYTES;
- y += inc;
- }
-
- QTAILQ_FOREACH(vs, &vd->clients, next) {
- if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) {
- vnc_copy(vs, src_x, src_y, dst_x, dst_y, w, h);
- }
- }
-}
-
static void vnc_mouse_set(DisplayChangeListener *dcl,
int x, int y, int visible)
{
@@ -3131,7 +3041,6 @@ static void vnc_listen_websocket_read(void *opaque)
static const DisplayChangeListenerOps dcl_ops = {
.dpy_name = "vnc",
.dpy_refresh = vnc_refresh,
- .dpy_gfx_copy = vnc_dpy_copy,
.dpy_gfx_update = vnc_dpy_update,
.dpy_gfx_switch = vnc_dpy_switch,
.dpy_gfx_check_format = qemu_pixman_check_format,
--
2.1.4