1
0
mirror of https://git.FreeBSD.org/ports.git synced 2024-11-29 01:13:08 +00:00

xen: update to 4.5.1

Update xen-kernel to 4.5.1 and add patches to allow live migration, save and
restore. Remove qemu-traditional patches (FreeBSD doesn't support
qemu-traditional) and add XSA-142.

Approved by:		bapt
Differential revision:	https://reviews.freebsd.org/D3854
Sponsored by:		Citrix Systems R&D
This commit is contained in:
Roger Pau Monné 2015-10-09 14:09:07 +00:00
parent d68358849f
commit 936db4de55
Notes: svn2git 2021-03-31 03:12:20 +00:00
svn path=/head/; revision=398918
29 changed files with 715 additions and 1115 deletions

View File

@ -2,8 +2,7 @@
PORTNAME= xen
PKGNAMESUFFIX= -kernel
PORTVERSION= 4.5.0
PORTREVISION= 5
PORTVERSION= 4.5.1
CATEGORIES= emulators
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${PORTVERSION}/
@ -24,20 +23,14 @@ PLIST_FILES= /boot/xen \
ALL_TARGET= build
STRIP= #
WRKSRC_SUBDIR= xen
EXTRA_PATCHES= ${FILESDIR}/iommu_share_p2m_table.patch:-p2 \
${FILESDIR}/0001-x86-pvh-disable-posted-interrupts.patch:-p2 \
${FILESDIR}/xsa117.patch:-p2 \
${FILESDIR}/xsa118-4.5-unstable-1.patch:-p2 \
${FILESDIR}/xsa118-4.5-unstable-2.patch:-p2 \
${FILESDIR}/xsa121.patch:-p2 \
${FILESDIR}/xsa122.patch:-p2 \
${FILESDIR}/xsa123.patch:-p2 \
${FILESDIR}/xsa125.patch:-p2 \
${FILESDIR}/xsa127-4.x.patch:-p2 \
${FILESDIR}/xsa132.patch:-p2 \
${FILESDIR}/xsa134.patch:-p2 \
${FILESDIR}/xsa136.patch:-p2 \
${FILESDIR}/0001-libelf-fix-elf_parse_bsdsyms-call.patch:-p2
EXTRA_PATCHES= ${FILESDIR}/0001-introduce-a-helper-to-allocate-non-contiguous-memory.patch:-p2 \
${FILESDIR}/0002-vmap-avoid-hitting-an-ASSERT-with-vfree-NULL.patch:-p2 \
${FILESDIR}/0003-x86-shadow-fix-shadow_track_dirty_vram-to-work-on-hv.patch:-p2 \
${FILESDIR}/0004-x86-hap-make-hap_track_dirty_vram-use-non-contiguous.patch:-p2 \
${FILESDIR}/0005-x86-rework-paging_log_dirty_op-to-work-with-hvm-gues.patch:-p2 \
${FILESDIR}/0006-xen-pvh-enable-mmu_update-hypercall.patch:-p2 \
${FILESDIR}/0007-iommu-fix-usage-of-shared-EPT-IOMMU-page-tables-on-P.patch:-p2 \
${FILESDIR}/0008-xen-arm-mm-Do-not-dump-the-p2m-when-mapping-a-foreig.patch:-p2
.include <bsd.port.options.mk>

View File

@ -1,2 +1,2 @@
SHA256 (xen-4.5.0.tar.gz) = 5bdb40e2b28d2eeb541bd71a9777f40cbe2ae444b987521d33f099541a006f3b
SIZE (xen-4.5.0.tar.gz) = 18404933
SHA256 (xen-4.5.1.tar.gz) = 668c11d4fca67ac44329e369f810356eacd37b28d28fb96e66aac77f3c5e1371
SIZE (xen-4.5.1.tar.gz) = 18410400

View File

@ -0,0 +1,141 @@
From 411801087603a1a070de7abbfa4373afe91ca3f5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Fri, 9 Oct 2015 12:57:31 +0200
Subject: [PATCH 1/8] introduce a helper to allocate non-contiguous memory
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The allocator uses independent calls to alloc_domheap_pages in order to get
the desired amount of memory and then maps all the independent physical
addresses into a contiguous virtual address space.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Tested-by: Julien Grall <julien.grall@citrix.com> (ARM)
Reviewed-by: Tim Deegan <tim@xen.org>
---
xen/common/vmap.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-arm/mm.h | 2 ++
xen/include/asm-x86/page.h | 2 ++
xen/include/xen/vmap.h | 3 +++
4 files changed, 74 insertions(+)
diff --git a/xen/common/vmap.c b/xen/common/vmap.c
index 783cea3..b6827b5 100644
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -215,4 +215,71 @@ void vunmap(const void *va)
#endif
vm_free(va);
}
+
+void *vmalloc(size_t size)
+{
+ unsigned long *mfn;
+ size_t pages, i;
+ struct page_info *pg;
+ void *va;
+
+ ASSERT(size);
+
+ pages = PFN_UP(size);
+ mfn = xmalloc_array(unsigned long, pages);
+ if ( mfn == NULL )
+ return NULL;
+
+ for ( i = 0; i < pages; i++ )
+ {
+ pg = alloc_domheap_page(NULL, 0);
+ if ( pg == NULL )
+ goto error;
+ mfn[i] = page_to_mfn(pg);
+ }
+
+ va = vmap(mfn, pages);
+ if ( va == NULL )
+ goto error;
+
+ xfree(mfn);
+ return va;
+
+ error:
+ while ( i-- )
+ free_domheap_page(mfn_to_page(mfn[i]));
+ xfree(mfn);
+ return NULL;
+}
+
+void *vzalloc(size_t size)
+{
+ void *p = vmalloc(size);
+ int i;
+
+ if ( p == NULL )
+ return NULL;
+
+ for ( i = 0; i < size; i += PAGE_SIZE )
+ clear_page(p + i);
+
+ return p;
+}
+
+void vfree(void *va)
+{
+ unsigned int i, pages = vm_size(va);
+ struct page_info *pg;
+ PAGE_LIST_HEAD(pg_list);
+
+ ASSERT(pages);
+
+ for ( i = 0; i < pages; i++ )
+ page_list_add(vmap_to_page(va + i * PAGE_SIZE), &pg_list);
+
+ vunmap(va);
+
+ while ( (pg = page_list_remove_head(&pg_list)) != NULL )
+ free_domheap_page(pg);
+}
#endif
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index d25e485..c0afcec 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -208,6 +208,8 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
#define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa))
+#define vmap_to_mfn(va) paddr_to_pfn(virt_to_maddr((vaddr_t)va))
+#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
/* Page-align address and convert to frame number format */
#define paddr_to_pfn_aligned(paddr) paddr_to_pfn(PAGE_ALIGN(paddr))
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index ccf0752..27c2ae7 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -262,6 +262,8 @@ void copy_page_sse2(void *, const void *);
#define pfn_to_paddr(pfn) __pfn_to_paddr(pfn)
#define paddr_to_pfn(pa) __paddr_to_pfn(pa)
#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa))
+#define vmap_to_mfn(va) l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))
+#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
#endif /* !defined(__ASSEMBLY__) */
diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h
index b1923dd..a13591d 100644
--- a/xen/include/xen/vmap.h
+++ b/xen/include/xen/vmap.h
@@ -11,6 +11,9 @@ void *__vmap(const unsigned long *mfn, unsigned int granularity,
unsigned int nr, unsigned int align, unsigned int flags);
void *vmap(const unsigned long *mfn, unsigned int nr);
void vunmap(const void *);
+void *vmalloc(size_t size);
+void *vzalloc(size_t size);
+void vfree(void *va);
void __iomem *ioremap(paddr_t, size_t);
--
1.9.5 (Apple Git-50.3)

View File

@ -1,36 +0,0 @@
From c2da83662498a5cd66512c684a0af178228f9d5a Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger.pau@citrix.com>
Date: Thu, 11 Jun 2015 17:08:26 +0200
Subject: [PATCH 1/2] libelf: fix elf_parse_bsdsyms call
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
elf_parse_bsdsyms expects the second paramater to be a physical address, not
a virtual one.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Tim Deegan <tim@xen.org>
---
xen/common/libelf/libelf-dominfo.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/xen/common/libelf/libelf-dominfo.c b/xen/common/libelf/libelf-dominfo.c
index 6120dd4..86403b9 100644
--- a/xen/common/libelf/libelf-dominfo.c
+++ b/xen/common/libelf/libelf-dominfo.c
@@ -438,7 +438,7 @@ static elf_errorstatus elf_xen_addr_calc_check(struct elf_binary *elf,
if ( parms->bsd_symtab )
{
- elf_parse_bsdsyms(elf, parms->virt_kend);
+ elf_parse_bsdsyms(elf, elf->pend);
if ( elf->bsd_symtab_pend )
parms->virt_kend = elf->bsd_symtab_pend + parms->virt_offset;
}
--
1.9.5 (Apple Git-50.3)

View File

@ -1,41 +0,0 @@
From 29debf629b50536343eaa9d6c0779f63721f6f59 Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger.pau@citrix.com>
Date: Thu, 21 May 2015 14:12:46 +0200
Subject: [PATCH] x86/pvh: disable posted interrupts
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Enabling posted interrupts requires the virtual interrupt delivery feature,
which is disabled for PVH guests, so make sure posted interrupts are also
disabled or else vmlaunch will fail.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reported-and-Tested-by: Lars Eggert <lars@netapp.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Cc: Eddie Dong <eddie.dong@intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/vmx/vmcs.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index a714549..9827a8e 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -980,6 +980,10 @@ static int construct_vmcs(struct vcpu *v)
v->arch.hvm_vmx.secondary_exec_control &=
~SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ /* Disable posted interrupts */
+ __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
+ vmx_pin_based_exec_control & ~PIN_BASED_POSTED_INTERRUPT);
+
/* Start in 64-bit mode. PVH 32bitfixme. */
vmentry_ctl |= VM_ENTRY_IA32E_MODE; /* GUEST_EFER.LME/LMA ignored */
--
1.9.5 (Apple Git-50.3)

View File

@ -0,0 +1,43 @@
From ed4a7917a6faa8b7e8f211eaeda270f96e45de7d Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Fri, 9 Oct 2015 12:57:31 +0200
Subject: [PATCH 2/8] vmap: avoid hitting an ASSERT with vfree(NULL)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
and unconditionally defer the vm_size() call, as it doesn't have a NULL
short circuit.
Reported-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
xen/common/vmap.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/xen/common/vmap.c b/xen/common/vmap.c
index b6827b5..9028802 100644
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -268,10 +268,14 @@ void *vzalloc(size_t size)
void vfree(void *va)
{
- unsigned int i, pages = vm_size(va);
+ unsigned int i, pages;
struct page_info *pg;
PAGE_LIST_HEAD(pg_list);
+ if ( !va )
+ return;
+
+ pages = vm_size(va);
ASSERT(pages);
for ( i = 0; i < pages; i++ )
--
1.9.5 (Apple Git-50.3)

View File

@ -0,0 +1,120 @@
From 055ee44e3cc7c40dc3a3319370d287591771a7f3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Fri, 9 Oct 2015 12:57:32 +0200
Subject: [PATCH 3/8] x86/shadow: fix shadow_track_dirty_vram to work on hvm
guests
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Modify shadow_track_dirty_vram to use a local buffer and then flush to the
guest without the paging_lock held. This is modeled after
hap_track_dirty_vram.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
xen/arch/x86/mm/shadow/common.c | 49 +++++++++++++++++++++++++----------------
1 file changed, 30 insertions(+), 19 deletions(-)
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index a5eed28..6b91b8c 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3478,7 +3478,7 @@ void shadow_clean_dirty_bitmap(struct domain *d)
int shadow_track_dirty_vram(struct domain *d,
unsigned long begin_pfn,
unsigned long nr,
- XEN_GUEST_HANDLE_64(uint8) dirty_bitmap)
+ XEN_GUEST_HANDLE_64(uint8) guest_dirty_bitmap)
{
int rc;
unsigned long end_pfn = begin_pfn + nr;
@@ -3488,6 +3488,7 @@ int shadow_track_dirty_vram(struct domain *d,
p2m_type_t t;
struct sh_dirty_vram *dirty_vram;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ uint8_t *dirty_bitmap = NULL;
if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 )
return -EINVAL;
@@ -3516,6 +3517,12 @@ int shadow_track_dirty_vram(struct domain *d,
goto out;
}
+ dirty_bitmap = vzalloc(dirty_size);
+ if ( dirty_bitmap == NULL )
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
/* This should happen seldomly (Video mode change),
* no need to be careful. */
if ( !dirty_vram )
@@ -3546,12 +3553,8 @@ int shadow_track_dirty_vram(struct domain *d,
rc = -ENODATA;
}
else if (dirty_vram->last_dirty == -1)
- {
/* still completely clean, just copy our empty bitmap */
- rc = -EFAULT;
- if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 )
- rc = 0;
- }
+ memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
else
{
unsigned long map_mfn = INVALID_MFN;
@@ -3630,21 +3633,19 @@ int shadow_track_dirty_vram(struct domain *d,
if ( map_sl1p )
sh_unmap_domain_page(map_sl1p);
- rc = -EFAULT;
- if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) == 0 ) {
- memset(dirty_vram->dirty_bitmap, 0, dirty_size);
- if (dirty_vram->last_dirty + SECONDS(2) < NOW())
+ memcpy(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size);
+ memset(dirty_vram->dirty_bitmap, 0, dirty_size);
+ if ( dirty_vram->last_dirty + SECONDS(2) < NOW() )
+ {
+ /* was clean for more than two seconds, try to disable guest
+ * write access */
+ for ( i = begin_pfn; i < end_pfn; i++ )
{
- /* was clean for more than two seconds, try to disable guest
- * write access */
- for ( i = begin_pfn; i < end_pfn; i++ ) {
- mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
- if (mfn_x(mfn) != INVALID_MFN)
- flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0);
- }
- dirty_vram->last_dirty = -1;
+ mfn_t mfn = get_gfn_query_unlocked(d, i, &t);
+ if ( mfn_x(mfn) != INVALID_MFN )
+ flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 1, 0);
}
- rc = 0;
+ dirty_vram->last_dirty = -1;
}
}
if ( flush_tlb )
@@ -3659,6 +3660,16 @@ out_dirty_vram:
out:
paging_unlock(d);
+ if ( rc == 0 && dirty_bitmap != NULL &&
+ copy_to_guest(guest_dirty_bitmap, dirty_bitmap, dirty_size) )
+ {
+ paging_lock(d);
+ for ( i = 0; i < dirty_size; i++ )
+ dirty_vram->dirty_bitmap[i] |= dirty_bitmap[i];
+ paging_unlock(d);
+ rc = -EFAULT;
+ }
+ vfree(dirty_bitmap);
p2m_unlock(p2m_get_hostp2m(d));
return rc;
}
--
1.9.5 (Apple Git-50.3)

View File

@ -0,0 +1,44 @@
From 50ca52274eda838f0562938fae3432a0f05f2585 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Fri, 9 Oct 2015 12:57:32 +0200
Subject: [PATCH 4/8] x86/hap: make hap_track_dirty_vram use non-contiguous
memory for temporary map
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Just like it's done for shadow_track_dirty_vram allocate the temporary
buffer using non-contiguous memory.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
xen/arch/x86/mm/hap/hap.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index abf3d7a..f7b12a8 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -87,7 +87,7 @@ int hap_track_dirty_vram(struct domain *d,
}
rc = -ENOMEM;
- dirty_bitmap = xzalloc_bytes(size);
+ dirty_bitmap = vzalloc(size);
if ( !dirty_bitmap )
goto out;
@@ -168,8 +168,7 @@ int hap_track_dirty_vram(struct domain *d,
p2m_ram_logdirty, p2m_ram_rw);
}
out:
- if ( dirty_bitmap )
- xfree(dirty_bitmap);
+ vfree(dirty_bitmap);
return rc;
}
--
1.9.5 (Apple Git-50.3)

View File

@ -0,0 +1,214 @@
From 63d4838f2f5644060c064f356078a748ca413364 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Fri, 9 Oct 2015 12:57:32 +0200
Subject: [PATCH 5/8] x86: rework paging_log_dirty_op to work with hvm guests
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
When the caller of paging_log_dirty_op is a hvm guest Xen would choke when
trying to copy the dirty bitmap to the guest because the paging lock is
already held.
Fix this by independently mapping each page of the guest bitmap as needed
without the paging lock held.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
---
xen/arch/x86/mm/paging.c | 98 +++++++++++++++++++++++++++++++++++++++-----
xen/include/asm-x86/domain.h | 1 +
2 files changed, 88 insertions(+), 11 deletions(-)
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 6b788f7..06dc7fa 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -397,6 +397,51 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
return rv;
}
+static inline void *map_dirty_bitmap(XEN_GUEST_HANDLE_64(uint8) dirty_bitmap,
+ unsigned long pages,
+ struct page_info **page)
+{
+ uint32_t pfec = PFEC_page_present | PFEC_write_access;
+ unsigned long gfn;
+ p2m_type_t p2mt;
+
+ gfn = paging_gva_to_gfn(current,
+ (unsigned long)(dirty_bitmap.p + (pages >> 3)),
+ &pfec);
+ if ( gfn == INVALID_GFN )
+ return NULL;
+
+ *page = get_page_from_gfn(current->domain, gfn, &p2mt, P2M_UNSHARE);
+
+ if ( !p2m_is_ram(p2mt) )
+ {
+ put_page(*page);
+ return NULL;
+ }
+ if ( p2m_is_paging(p2mt) )
+ {
+ put_page(*page);
+ p2m_mem_paging_populate(current->domain, gfn);
+ return NULL;
+ }
+ if ( p2m_is_shared(p2mt) )
+ {
+ put_page(*page);
+ return NULL;
+ }
+
+ return __map_domain_page(*page);
+}
+
+static inline void unmap_dirty_bitmap(void *addr, struct page_info *page)
+{
+ if ( addr != NULL )
+ {
+ unmap_domain_page(addr);
+ put_page(page);
+ }
+}
+
/* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN,
* clear the bitmap and stats as well. */
@@ -409,9 +454,22 @@ static int paging_log_dirty_op(struct domain *d,
mfn_t *l4 = NULL, *l3 = NULL, *l2 = NULL;
unsigned long *l1 = NULL;
int i4, i3, i2;
+ uint8_t *dirty_bitmap;
+ struct page_info *page;
+ unsigned long index_mapped;
+ again:
if ( !resuming )
domain_pause(d);
+
+ index_mapped = resuming ? d->arch.paging.preempt.log_dirty.done : 0;
+ dirty_bitmap = map_dirty_bitmap(sc->dirty_bitmap, index_mapped, &page);
+ if ( dirty_bitmap == NULL )
+ {
+ domain_unpause(d);
+ return -EFAULT;
+ }
+
paging_lock(d);
if ( !d->arch.paging.preempt.dom )
@@ -451,18 +509,18 @@ static int paging_log_dirty_op(struct domain *d,
l4 = paging_map_log_dirty_bitmap(d);
i4 = d->arch.paging.preempt.log_dirty.i4;
i3 = d->arch.paging.preempt.log_dirty.i3;
+ i2 = d->arch.paging.preempt.log_dirty.i2;
pages = d->arch.paging.preempt.log_dirty.done;
for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
{
l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
- for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
+ for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
+ i3++, i2 = 0 )
{
l2 = ((l3 && mfn_valid(l3[i3])) ?
map_domain_page(mfn_x(l3[i3])) : NULL);
- for ( i2 = 0;
- (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
- i2++ )
+ for ( ; (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); i2++ )
{
unsigned int bytes = PAGE_SIZE;
l1 = ((l2 && mfn_valid(l2[i2])) ?
@@ -471,15 +529,28 @@ static int paging_log_dirty_op(struct domain *d,
bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
if ( likely(peek) )
{
- if ( (l1 ? copy_to_guest_offset(sc->dirty_bitmap,
- pages >> 3, (uint8_t *)l1,
- bytes)
- : clear_guest_offset(sc->dirty_bitmap,
- pages >> 3, bytes)) != 0 )
+ if ( pages >> (3 + PAGE_SHIFT) !=
+ index_mapped >> (3 + PAGE_SHIFT) )
{
- rv = -EFAULT;
- goto out;
+ /* We need to map next page */
+ d->arch.paging.preempt.log_dirty.i4 = i4;
+ d->arch.paging.preempt.log_dirty.i3 = i3;
+ d->arch.paging.preempt.log_dirty.i2 = i2;
+ d->arch.paging.preempt.log_dirty.done = pages;
+ d->arch.paging.preempt.dom = current->domain;
+ d->arch.paging.preempt.op = sc->op;
+ resuming = 1;
+ paging_unlock(d);
+ unmap_dirty_bitmap(dirty_bitmap, page);
+ goto again;
}
+ ASSERT(((pages >> 3) % PAGE_SIZE) + bytes <= PAGE_SIZE);
+ if ( l1 )
+ memcpy(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), l1,
+ bytes);
+ else
+ memset(dirty_bitmap + ((pages >> 3) % PAGE_SIZE), 0,
+ bytes);
}
pages += bytes << 3;
if ( l1 )
@@ -496,6 +567,7 @@ static int paging_log_dirty_op(struct domain *d,
{
d->arch.paging.preempt.log_dirty.i4 = i4;
d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
+ d->arch.paging.preempt.log_dirty.i2 = 0;
rv = -ERESTART;
break;
}
@@ -508,6 +580,7 @@ static int paging_log_dirty_op(struct domain *d,
{
d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
d->arch.paging.preempt.log_dirty.i3 = 0;
+ d->arch.paging.preempt.log_dirty.i2 = 0;
rv = -ERESTART;
}
if ( rv )
@@ -537,6 +610,7 @@ static int paging_log_dirty_op(struct domain *d,
if ( rv )
{
/* Never leave the domain paused on real errors. */
+ unmap_dirty_bitmap(dirty_bitmap, page);
ASSERT(rv == -ERESTART);
return rv;
}
@@ -549,12 +623,14 @@ static int paging_log_dirty_op(struct domain *d,
* paging modes (shadow or hap). Safe because the domain is paused. */
d->arch.paging.log_dirty.clean_dirty_bitmap(d);
}
+ unmap_dirty_bitmap(dirty_bitmap, page);
domain_unpause(d);
return rv;
out:
d->arch.paging.preempt.dom = NULL;
paging_unlock(d);
+ unmap_dirty_bitmap(dirty_bitmap, page);
domain_unpause(d);
if ( l1 )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 6a77a93..63dea9c 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -197,6 +197,7 @@ struct paging_domain {
unsigned long done:PADDR_BITS - PAGE_SHIFT;
unsigned long i4:PAGETABLE_ORDER;
unsigned long i3:PAGETABLE_ORDER;
+ unsigned long i2:PAGETABLE_ORDER;
} log_dirty;
};
} preempt;
--
1.9.5 (Apple Git-50.3)

View File

@ -0,0 +1,33 @@
From bec71ff2b61acc42e71a2bd79ec5cf172130e5f8 Mon Sep 17 00:00:00 2001
From: Roger Pau Monne <roger.pau@citrix.com>
Date: Fri, 9 Oct 2015 12:57:32 +0200
Subject: [PATCH 6/8] xen/pvh: enable mmu_update hypercall
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This is needed for performing save/restore of PV guests.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Cc: Tim Deegan <tim@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7d53c6c..d3fdc3d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4843,6 +4843,7 @@ static hvm_hypercall_t *const pvh_hypercall64_table[NR_hypercalls] = {
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
HYPERCALL(vcpu_op),
HYPERCALL(mmuext_op),
+ HYPERCALL(mmu_update),
HYPERCALL(xsm_op),
HYPERCALL(sched_op),
HYPERCALL(event_channel_op),
--
1.9.5 (Apple Git-50.3)

View File

@ -1,9 +1,10 @@
From 7978429727a9da328444749951005b595de41098 Mon Sep 17 00:00:00 2001
From: =?utf8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Mon, 9 Mar 2015 14:01:40 +0100
Subject: [PATCH] iommu: fix usage of shared EPT/IOMMU page tables on PVH guests
From 43f76b1443c8fc3b54dbb9fb466becbb9d584d6d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Fri, 9 Oct 2015 12:57:33 +0200
Subject: [PATCH 7/8] iommu: fix usage of shared EPT/IOMMU page tables on PVH
guests
MIME-Version: 1.0
Content-Type: text/plain; charset=utf8
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
iommu_share_p2m_table should not prevent PVH guests from using a shared page
@ -15,16 +16,16 @@ Also fix another incorrect usage of is_hvm_domain usage in
arch_iommu_populate_page_table. This has not given problems so far because
all the pages in PVH guests are of type PGT_writable_page.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Tested-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Acked-by: Kevin Tian <kevin.tian@intel.com>
---
xen/drivers/passthrough/amd/iommu_map.c | 2 --
xen/drivers/passthrough/iommu.c | 3 ++-
xen/drivers/passthrough/vtd/iommu.c | 2 --
xen/drivers/passthrough/x86/iommu.c | 2 +-
xen/drivers/passthrough/amd/iommu_map.c | 2 --
xen/drivers/passthrough/iommu.c | 3 ++-
xen/drivers/passthrough/vtd/iommu.c | 2 --
xen/drivers/passthrough/x86/iommu.c | 2 +-
4 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c
@ -55,10 +56,10 @@ index cc12735..7fcbbb1 100644
}
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 1063677..48676c5 100644
index 5a946d4..a5a111c 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -1789,8 +1789,6 @@ static void iommu_set_pgd(struct domain *d)
@@ -1802,8 +1802,6 @@ static void iommu_set_pgd(struct domain *d)
struct hvm_iommu *hd = domain_hvm_iommu(d);
mfn_t pgd_mfn;
@ -68,7 +69,7 @@ index 1063677..48676c5 100644
return;
diff --git a/xen/drivers/passthrough/x86/iommu.c b/xen/drivers/passthrough/x86/iommu.c
index 52d8948..9eb8d33 100644
index ce0ca5a..3d2c12a 100644
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -56,7 +56,7 @@ int arch_iommu_populate_page_table(struct domain *d)
@ -81,5 +82,5 @@ index 52d8948..9eb8d33 100644
{
BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
--
1.7.2.5
1.9.5 (Apple Git-50.3)

View File

@ -0,0 +1,39 @@
From 403805aca7a4a508cf193d63aa525b3a76bb09dd Mon Sep 17 00:00:00 2001
From: Julien Grall <julien.grall@citrix.com>
Date: Fri, 9 Oct 2015 13:00:35 +0200
Subject: [PATCH 8/8] xen/arm: mm: Do not dump the p2m when mapping a foreign
gfn
The physmap operation XENMAPSPACE_gfmn_foreign is dumping the p2m when
an error occured by calling dump_p2m_lookup. But this function is not
using ratelimited printk.
Any domain able to map foreign gfmn would be able to flood the Xen
console.
The information wasn't not useful so drop it.
This is XSA-141.
Signed-off-by: Julien Grall <julien.grall@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
(cherry picked from commit afc13fe5e21d18c09e44f8ae6f7f4484e9f1de7f)
---
xen/arch/arm/mm.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 7d4ba0c..7d95961 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1103,7 +1103,6 @@ int xenmem_add_to_physmap_one(
page = get_page_from_gfn(od, idx, &p2mt, P2M_ALLOC);
if ( !page )
{
- dump_p2m_lookup(od, pfn_to_paddr(idx));
rcu_unlock_domain(od);
return -EINVAL;
}
--
1.9.5 (Apple Git-50.3)

View File

@ -1,42 +0,0 @@
From 472dc9e627c8f1b9d7138b142a5b0838550a2072 Mon Sep 17 00:00:00 2001
From: Julien Grall <julien.grall@linaro.org>
Date: Fri, 23 Jan 2015 14:15:07 +0000
Subject: [PATCH] xen/arm: vgic-v2: Don't crash the hypervisor if the SGI
target mode is invalid
The GICv2 spec reserved the value 0b11 for GICD_SGIR.TargetListFilter.
Even if it's an invalid value, a malicious guest could write this value
and threfore crash the hypervisor.
Replace the BUG() by logging the error and inject a data abort to the guest.
This was introduced by commit ea37fd21110b6fbcf9257f814076a243d3873cb7
"xen/arm: split vgic driver into generic and vgic-v2 driver".
This is CVE-2015-0268 / XSA-117.
Signed-off-by: Julien Grall <julien.grall@linaro.org>
---
xen/arch/arm/vgic-v2.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 598bf06..9dc9a20 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -257,7 +257,10 @@ static int vgic_v2_to_sgi(struct vcpu *v, register_t sgir)
sgi_mode = SGI_TARGET_SELF;
break;
default:
- BUG();
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: unhandled GICD_SGIR write %"PRIregister" with wrong mode\n",
+ v, sgir);
+ return 0;
}
return vgic_to_sgi(v, sgir, sgi_mode, virq, vcpu_mask);
--
2.1.4

View File

@ -1,253 +0,0 @@
From e698f4ab05a710e4463317ea978d426d43107e27 Mon Sep 17 00:00:00 2001
From: Julien Grall <julien.grall@linaro.org>
Date: Mon, 19 Jan 2015 14:01:09 +0000
Subject: [PATCH 1/2] xen/arm: vgic-v3: message in the emulation code should be
rate-limited
printk by default is not rate-limited by default. Therefore a malicious guest
may be able to flood the Xen console.
If we use gdprintk, unnecessary information will be printed such as the
filename and the line. Instead use XENLOG_G_{ERR,DEBUG} combine with %pv.
Also remove the vGICv3 prefix which is not neccessary and update some
message which were wrong.
Signed-off-by: Julien Grall <julien.grall@linaro.org>
---
xen/arch/arm/vgic-v3.c | 109 +++++++++++++++++++++++++++----------------------
1 file changed, 61 insertions(+), 48 deletions(-)
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index ae4482c..bece189 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -168,13 +168,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
/* Reserved0 */
goto read_as_zero;
default:
- printk("vGICv3: vGICR: read r%d offset %#08x\n not found",
- dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: read r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICv3: vGICR: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR "%pv vGICR: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -244,12 +245,14 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
/* RO */
goto write_ignore;
default:
- printk("vGICR: write r%d offset %#08x\n not found", dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR "%pv: vGICR: write r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
- dabt.size, dabt.reg, *r, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -345,15 +348,16 @@ static int __vgic_v3_distr_common_mmio_read(struct vcpu *v, mmio_info_t *info,
vgic_unlock_rank(v, rank, flags);
return 1;
default:
- printk("vGICv3: vGICD/vGICR: unhandled read r%d offset %#08x\n",
- dabt.reg, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: unhandled read r%d offset %#08x\n",
+ v, dabt.reg, reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR,
- "vGICv3: vGICD/vGICR: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, reg);
domain_crash_synchronous();
return 0;
@@ -458,15 +462,16 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu *v, mmio_info_t *info,
vgic_unlock_rank(v, rank, flags);
return 1;
default:
- printk("vGICv3: vGICD/vGICR: unhandled write r%d "
- "=%"PRIregister" offset %#08x\n", dabt.reg, *r, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: unhandled write r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.reg, *r, reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR,
- "vGICv3: vGICD/vGICR: bad write width %d r%d=%"PRIregister" "
- "offset %#08x\n", dabt.size, dabt.reg, *r, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, reg);
domain_crash_synchronous();
return 0;
@@ -521,13 +526,14 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info,
if ( dabt.size != DABT_WORD ) goto bad_width;
return 1;
default:
- printk("vGICv3: vGICR: read r%d offset %#08x\n not found",
- dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: SGI: read r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICv3: vGICR: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR "%pv: vGICR: SGI: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -585,14 +591,16 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info,
/* We do not implement security extensions for guests, write ignore */
goto write_ignore;
default:
- printk("vGICv3: vGICR SGI: write r%d offset %#08x\n not found",
- dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: SGI: write r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICR SGI: bad write width %d r%d=%"PRIregister" offset %#08x\n",
- dabt.size, dabt.reg, *r, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: SGI: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -618,9 +626,9 @@ static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info)
else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) )
return vgic_v3_rdistr_sgi_mmio_read(v, info, (offset - SZ_64K));
else
- gdprintk(XENLOG_WARNING,
- "vGICv3: vGICR: unknown gpa read address %"PRIpaddr"\n",
- info->gpa);
+ printk(XENLOG_G_WARNING
+ "%pv: vGICR: unknown gpa read address %"PRIpaddr"\n",
+ v, info->gpa);
return 0;
}
@@ -642,9 +650,9 @@ static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info)
else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) )
return vgic_v3_rdistr_sgi_mmio_write(v, info, (offset - SZ_64K));
else
- gdprintk(XENLOG_WARNING,
- "vGICV3: vGICR: unknown gpa write address %"PRIpaddr"\n",
- info->gpa);
+ printk(XENLOG_G_WARNING
+ "%pv: vGICR: unknown gpa write address %"PRIpaddr"\n",
+ v, info->gpa);
return 0;
}
@@ -770,18 +778,19 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
case 0xf30 ... 0x5fcc:
case 0x8000 ... 0xbfcc:
/* These are reserved register addresses */
- printk("vGICv3: vGICD: read unknown 0x00c .. 0xfcc r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: RAZ on reserved register offset %#08x\n",
+ v, gicd_reg);
goto read_as_zero;
default:
- printk("vGICv3: vGICD: unhandled read r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n",
+ v, dabt.reg, gicd_reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR, "vGICv3: vGICD: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicd_reg);
domain_crash_synchronous();
return 0;
@@ -840,8 +849,9 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case 0x020 ... 0x03c:
case 0xc000 ... 0xffcc:
/* Implementation defined -- write ignored */
- printk("vGICv3: vGICD: write unknown 0x020 - 0x03c r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: WI on implementation defined register offset %#08x\n",
+ v, gicd_reg);
goto write_ignore;
case GICD_IGROUPR ... GICD_IGROUPRN:
case GICD_ISENABLER ... GICD_ISENABLERN:
@@ -885,8 +895,9 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
new_target = new_irouter & MPIDR_AFF0_MASK;
if ( new_target >= v->domain->max_vcpus )
{
- printk("vGICv3: vGICD: wrong irouter at offset %#08x\n val 0x%lx vcpu %x",
- gicd_reg, new_target, v->domain->max_vcpus);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: wrong irouter at offset %#08x\n val 0x%lx vcpu %x",
+ v, gicd_reg, new_target, v->domain->max_vcpus);
vgic_unlock_rank(v, rank, flags);
return 0;
}
@@ -926,19 +937,21 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case 0xf30 ... 0x5fcc:
case 0x8000 ... 0xbfcc:
/* Reserved register addresses */
- printk("vGICv3: vGICD: write unknown 0x00c 0xfcc r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: write unknown 0x00c 0xfcc r%d offset %#08x\n",
+ v, dabt.reg, gicd_reg);
goto write_ignore;
default:
- printk("vGICv3: vGICD: unhandled write r%d=%"PRIregister" "
- "offset %#08x\n", dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.reg, *r, gicd_reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR,
- "VGICv3: vGICD: bad write width %d r%d=%"PRIregister" "
- "offset %#08x\n", dabt.size, dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicd_reg);
domain_crash_synchronous();
return 0;
--
2.1.4

View File

@ -1,115 +0,0 @@
From e8fa469595e29b2dbe6dde3a77ee2ea2d9e93283 Mon Sep 17 00:00:00 2001
From: Julien Grall <julien.grall@linaro.org>
Date: Mon, 19 Jan 2015 12:59:42 +0000
Subject: [PATCH 2/2] xen/arm: vgic-v2: message in the emulation code should be
rate-limited
printk is not rated-limited by default. Therefore a malicious guest may
be able to flood the Xen console.
If we use gdprintk, unecessary information will be printed such as the
filename and the line. Instead use XENLOG_G_ERR combine with %pv.
Signed-off-by: Julien Grall <julien.grall@linaro.org>
---
xen/arch/arm/vgic-v2.c | 40 +++++++++++++++++++++++-----------------
1 file changed, 23 insertions(+), 17 deletions(-)
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 9dc9a20..3b87f54 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -198,7 +198,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
case GICD_ICPIDR2:
if ( dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled read from ICPIDR2\n");
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read from ICPIDR2\n", v);
return 0;
/* Implementation defined -- read as zero */
@@ -215,14 +215,14 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
goto read_as_zero;
default:
- printk("vGICD: unhandled read r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n",
+ v, dabt.reg, gicd_reg);
return 0;
}
bad_width:
- printk("vGICD: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicd_reg);
domain_crash_synchronous();
return 0;
@@ -331,14 +331,16 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case GICD_ISPENDR ... GICD_ISPENDRN:
if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
+ v, dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
return 0;
case GICD_ICPENDR ... GICD_ICPENDRN:
if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
+ v, dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
return 0;
case GICD_ISACTIVER ... GICD_ISACTIVERN:
@@ -457,14 +459,16 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
+ v, dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
return 0;
case GICD_SPENDSGIR ... GICD_SPENDSGIRN:
if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
+ v, dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
return 0;
/* Implementation defined -- write ignored */
@@ -489,14 +493,16 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
goto write_ignore;
default:
- printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
- dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.reg, *r, gicd_reg);
return 0;
}
bad_width:
- printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
- dabt.size, dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicd_reg);
domain_crash_synchronous();
return 0;
--
2.1.4

View File

@ -1,51 +0,0 @@
x86/HVM: return all ones on wrong-sized reads of system device I/O ports
So far the value presented to the guest remained uninitialized.
This is CVE-2015-2044 / XSA-121.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -486,6 +486,7 @@ static int handle_pit_io(
if ( bytes != 1 )
{
gdprintk(XENLOG_WARNING, "PIT bad access\n");
+ *val = ~0;
return X86EMUL_OKAY;
}
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -213,6 +213,7 @@ static int handle_pmt_io(
if ( bytes != 4 )
{
gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
+ *val = ~0;
return X86EMUL_OKAY;
}
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -703,7 +703,8 @@ static int handle_rtc_io(
if ( bytes != 1 )
{
- gdprintk(XENLOG_WARNING, "HVM_RTC bas access\n");
+ gdprintk(XENLOG_WARNING, "HVM_RTC bad access\n");
+ *val = ~0;
return X86EMUL_OKAY;
}
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -331,6 +331,7 @@ static int vpic_intercept_pic_io(
if ( bytes != 1 )
{
gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes);
+ *val = ~0;
return X86EMUL_OKAY;
}

View File

@ -1,40 +0,0 @@
pre-fill structures for certain HYPERVISOR_xen_version sub-ops
... avoiding to pass hypervisor stack contents back to the caller
through space unused by the respective strings.
This is CVE-2015-2045 / XSA-122.
Signed-off-by: Aaron Adams <Aaron.Adams@nccgroup.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -240,6 +240,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
case XENVER_extraversion:
{
xen_extraversion_t extraversion;
+
+ memset(extraversion, 0, sizeof(extraversion));
safe_strcpy(extraversion, xen_extra_version());
if ( copy_to_guest(arg, extraversion, ARRAY_SIZE(extraversion)) )
return -EFAULT;
@@ -249,6 +251,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
case XENVER_compile_info:
{
struct xen_compile_info info;
+
+ memset(&info, 0, sizeof(info));
safe_strcpy(info.compiler, xen_compiler());
safe_strcpy(info.compile_by, xen_compile_by());
safe_strcpy(info.compile_domain, xen_compile_domain());
@@ -284,6 +288,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
case XENVER_changeset:
{
xen_changeset_info_t chgset;
+
+ memset(chgset, 0, sizeof(chgset));
safe_strcpy(chgset, xen_changeset());
if ( copy_to_guest(arg, chgset, ARRAY_SIZE(chgset)) )
return -EFAULT;

View File

@ -1,24 +0,0 @@
x86emul: fully ignore segment override for register-only operations
For ModRM encoded instructions with register operands we must not
overwrite ea.mem.seg (if a - bogus in that case - segment override was
present) as it aliases with ea.reg.
This is CVE-2015-2151 / XSA-123.
Reported-by: Felix Wilhelm <fwilhelm@ernw.de>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Reviewed-by: Keir Fraser <keir@xen.org>
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1757,7 +1757,7 @@ x86_emulate(
}
}
- if ( override_seg != -1 )
+ if ( override_seg != -1 && ea.type == OP_MEM )
ea.mem.seg = override_seg;
/* Early operand adjustments. */

View File

@ -1,71 +0,0 @@
From 98670acc98cad5aee0e0714694a64d3b96675c36 Mon Sep 17 00:00:00 2001
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Date: Wed, 19 Nov 2014 12:57:11 -0500
Subject: [PATCH] Limit XEN_DOMCTL_memory_mapping hypercall to only process up
to 64 GFNs (or less)
Said hypercall for large BARs can take quite a while. As such
we can require that the hypercall MUST break up the request
in smaller values.
Another approach is to add preemption to it - whether we do the
preemption using hypercall_create_continuation or returning
EAGAIN to userspace (and have it re-invocate the call) - either
way the issue we cannot easily solve is that in 'map_mmio_regions'
if we encounter an error we MUST call 'unmap_mmio_regions' for the
whole BAR region.
Since the preemption would re-use input fields such as nr_mfns,
first_gfn, first_mfn - we would lose the original values -
and only undo what was done in the current round (i.e. ignoring
anything that was done prior to earlier preemptions).
Unless we re-used the return value as 'EAGAIN|nr_mfns_done<<10' but
that puts a limit (since the return value is a long) on the amount
of nr_mfns that can provided.
This patch sidesteps this problem by:
- Setting an hard limit of nr_mfns having to be 64 or less.
- Toolstack adjusts correspondingly to the nr_mfn limit.
- If the there is an error when adding the toolstack will call the
remove operation to remove the whole region.
The need to break this hypercall down is for large BARs can take
more than the guest (initial domain usually) time-slice. This has
the negative result in that the guest is locked out for a long
duration and is unable to act on any pending events.
We also augment the code to return zero if nr_mfns instead
of trying to the hypercall.
Suggested-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
[v50: Simplify loop]
[v51: If max_batch_sz 1 (or less) we would return zero. Fix that]
[v52: Handle nr_mfns being zero]
[v53: Fix up return value]
---
tools/libxc/xc_domain.c | 46 +++++++++++++++++++++++++++++++++++++++++----
xen/common/domctl.c | 5 +++++
xen/include/public/domctl.h | 1 +
3 files changed, 48 insertions(+), 4 deletions(-)
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index d396cc4..c2e60a7 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -1027,6 +1027,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
(gfn + nr_mfns - 1) < gfn ) /* wrap? */
break;
+ ret = -E2BIG;
+ /* Must break hypercall up as this could take a while. */
+ if ( nr_mfns > 64 )
+ break;
+
ret = -EPERM;
if ( !iomem_access_permitted(current->domain, mfn, mfn_end) ||
!iomem_access_permitted(d, mfn, mfn_end) )

View File

@ -1,50 +0,0 @@
domctl: don't allow a toolstack domain to call domain_pause() on itself
These DOMCTL subops were accidentally declared safe for disaggregation
in the wake of XSA-77.
This is XSA-127.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -888,6 +888,10 @@ long arch_do_domctl(
{
xen_guest_tsc_info_t info;
+ ret = -EINVAL;
+ if ( d == current->domain ) /* no domain_pause() */
+ break;
+
domain_pause(d);
tsc_get_info(d, &info.tsc_mode,
&info.elapsed_nsec,
@@ -903,6 +907,10 @@ long arch_do_domctl(
case XEN_DOMCTL_settscinfo:
{
+ ret = -EINVAL;
+ if ( d == current->domain ) /* no domain_pause() */
+ break;
+
domain_pause(d);
tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode,
domctl->u.tsc_info.info.elapsed_nsec,
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -522,8 +522,10 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_DOMCTL_resumedomain:
{
- domain_resume(d);
- ret = 0;
+ if ( d == current->domain ) /* no domain_pause() */
+ ret = -EINVAL;
+ else
+ domain_resume(d);
}
break;

View File

@ -1,29 +0,0 @@
domctl/sysctl: don't leak hypervisor stack to toolstacks
This is XSA-132.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -884,7 +884,7 @@ long arch_do_domctl(
case XEN_DOMCTL_gettscinfo:
{
- xen_guest_tsc_info_t info;
+ xen_guest_tsc_info_t info = { 0 };
ret = -EINVAL;
if ( d == current->domain ) /* no domain_pause() */
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -76,7 +76,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_SYSCTL_getdomaininfolist:
{
struct domain *d;
- struct xen_domctl_getdomaininfo info;
+ struct xen_domctl_getdomaininfo info = { 0 };
u32 num_domains = 0;
rcu_read_lock(&domlist_read_lock);

View File

@ -1,23 +0,0 @@
From: Jan Beulich <jbeulich@suse.com>
Subject: gnttab: add missing version check to GNTTABOP_swap_grant_ref handling
... avoiding NULL derefs when the version to use wasn't set yet (via
GNTTABOP_setup_table or GNTTABOP_set_version).
This is XSA-134.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2592,6 +2592,9 @@ __gnttab_swap_grant_ref(grant_ref_t ref_
spin_lock(&gt->lock);
+ if ( gt->gt_version == 0 )
+ PIN_FAIL(out, GNTST_general_error, "grant table not yet set up\n");
+
/* Bounds check on the grant refs */
if ( unlikely(ref_a >= nr_grant_entries(d->grant_table)))
PIN_FAIL(out, GNTST_bad_gntref, "Bad ref-a (%d).\n", ref_a);

View File

@ -1,19 +0,0 @@
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/traps: loop in the correct direction in compat_iret()
This is XSA-136.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ b/xen/arch/x86/x86_64/compat/traps.c
@@ -119,7 +119,7 @@ unsigned int compat_iret(void)
}
else if ( ksp > regs->_esp )
{
- for (i = 9; i > 0; ++i)
+ for ( i = 9; i > 0; --i )
{
rc |= __get_user(x, (u32 *)regs->rsp + i);
rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);

View File

@ -2,6 +2,7 @@
PORTNAME= xen
PORTVERSION= 4.5.1
PORTREVISION= 1
CATEGORIES= sysutils emulators
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${PORTVERSION}/ \
http://code.coreboot.org/p/seabios/downloads/get/:seabios
@ -48,6 +49,7 @@ QEMU_ARGS= --disable-gtk \
--cxx=c++
EXTRA_PATCHES= ${FILESDIR}/xsa137.patch:-p1 \
${FILESDIR}/xsa142-4.5.patch:-p1 \
${FILESDIR}/0002-libxc-fix-xc_dom_load_elf_symtab.patch:-p1
CONFIGURE_ARGS+= --with-extra-qemuu-configure-args="${QEMU_ARGS}"
@ -75,10 +77,6 @@ post-patch:
${WRKSRC}/tools/libxl/libxl_dm.c \
${WRKSRC}/tools/qemu-xen-traditional/i386-dm/helper2.c \
${WRKSRC}/docs/man/*
@for p in ${FILESDIR}/*qemut*.patch; do \
${ECHO_CMD} "====> Applying $${p##*/}" ; \
${PATCH} -s -p1 -i $${p} -d ${WRKSRC}/tools/qemu-xen-traditional ; \
done
@for p in ${FILESDIR}/*qemuu*.patch; do \
${ECHO_CMD} "====> Applying $${p##*/}" ; \
${PATCH} -s -p1 -i $${p} -d ${WRKSRC}/tools/qemu-xen ; \

View File

@ -1,92 +0,0 @@
pcnet: fix Negative array index read
From: Gonglei <arei.gonglei@huawei.com>
s->xmit_pos maybe assigned to a negative value (-1),
but in this branch variable s->xmit_pos as an index to
array s->buffer. Let's add a check for s->xmit_pos.
upstream-commit-id: 7b50d00911ddd6d56a766ac5671e47304c20a21b
Signed-off-by: Gonglei <arei.gonglei@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
diff --git a/hw/pcnet.c b/hw/pcnet.c
index 7cc0637..9f3e1cc 100644
--- a/hw/pcnet.c
+++ b/hw/pcnet.c
@@ -1250,7 +1250,7 @@ static void pcnet_transmit(PCNetState *s)
target_phys_addr_t xmit_cxda = 0;
int count = CSR_XMTRL(s)-1;
int add_crc = 0;
-
+ int bcnt;
s->xmit_pos = -1;
if (!CSR_TXON(s)) {
@@ -1276,34 +1276,39 @@ static void pcnet_transmit(PCNetState *s)
if (BCR_SWSTYLE(s) != 1)
add_crc = GET_FIELD(tmd.status, TMDS, ADDFCS);
}
+
+ if (s->xmit_pos < 0) {
+ goto txdone;
+ }
+
+ bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
+ s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
+ s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
+ s->xmit_pos += bcnt;
+
if (!GET_FIELD(tmd.status, TMDS, ENP)) {
- int bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
- s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
- s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
- s->xmit_pos += bcnt;
- } else if (s->xmit_pos >= 0) {
- int bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
- s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
- s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
- s->xmit_pos += bcnt;
+ goto txdone;
+ }
#ifdef PCNET_DEBUG
- printf("pcnet_transmit size=%d\n", s->xmit_pos);
+ printf("pcnet_transmit size=%d\n", s->xmit_pos);
#endif
- if (CSR_LOOP(s)) {
- if (BCR_SWSTYLE(s) == 1)
- add_crc = !GET_FIELD(tmd.status, TMDS, NOFCS);
- s->looptest = add_crc ? PCNET_LOOPTEST_CRC : PCNET_LOOPTEST_NOCRC;
- pcnet_receive(s, s->buffer, s->xmit_pos);
- s->looptest = 0;
- } else
- if (s->vc)
- qemu_send_packet(s->vc, s->buffer, s->xmit_pos);
-
- s->csr[0] &= ~0x0008; /* clear TDMD */
- s->csr[4] |= 0x0004; /* set TXSTRT */
- s->xmit_pos = -1;
+ if (CSR_LOOP(s)) {
+ if (BCR_SWSTYLE(s) == 1)
+ add_crc = !GET_FIELD(tmd.status, TMDS, NOFCS);
+ s->looptest = add_crc ? PCNET_LOOPTEST_CRC : PCNET_LOOPTEST_NOCRC;
+ pcnet_receive(s, s->buffer, s->xmit_pos);
+ s->looptest = 0;
+ } else {
+ if (s->vc) {
+ qemu_send_packet(s->vc, s->buffer, s->xmit_pos);
+ }
}
+ s->csr[0] &= ~0x0008; /* clear TDMD */
+ s->csr[4] |= 0x0004; /* set TXSTRT */
+ s->xmit_pos = -1;
+
+ txdone:
SET_FIELD(&tmd.status, TMDS, OWN, 0);
TMDSTORE(&tmd, PHYSADDR(s,CSR_CXDA(s)));
if (!CSR_TOKINTD(s) || (CSR_LTINTEN(s) && GET_FIELD(tmd.status, TMDS, LTINT)))

View File

@ -1,45 +0,0 @@
From 2630672ab22255de252f877709851c0557a1c647 Mon Sep 17 00:00:00 2001
From: Petr Matousek <pmatouse@redhat.com>
Date: Sun, 24 May 2015 10:53:44 +0200
Subject: [PATCH] pcnet: force the buffer access to be in bounds during tx
4096 is the maximum length per TMD and it is also currently the size of
the relay buffer pcnet driver uses for sending the packet data to QEMU
for further processing. With packet spanning multiple TMDs it can
happen that the overall packet size will be bigger than sizeof(buffer),
which results in memory corruption.
Fix this by only allowing to queue maximum sizeof(buffer) bytes.
This is CVE-2015-3209.
Signed-off-by: Petr Matousek <pmatouse@redhat.com>
Reported-by: Matt Tait <matttait@google.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
---
hw/pcnet.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/hw/pcnet.c b/hw/pcnet.c
index bdfd38f..6d32e4c 100644
--- a/hw/pcnet.c
+++ b/hw/pcnet.c
@@ -1241,6 +1241,14 @@ static void pcnet_transmit(PCNetState *s)
}
bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
+
+ /* if multi-tmd packet outsizes s->buffer then skip it silently.
+ Note: this is not what real hw does */
+ if (s->xmit_pos + bcnt > sizeof(s->buffer)) {
+ s->xmit_pos = -1;
+ goto txdone;
+ }
+
s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
s->xmit_pos += bcnt;
--
2.1.0

View File

@ -1,77 +0,0 @@
From 510952d4c33ee69574167ce30829b21c815a165b Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 3 Jun 2015 14:13:31 +0200
Subject: [PATCH 1/2] ide: Check array bounds before writing to io_buffer
(CVE-2015-5154)
If the end_transfer_func of a command is called because enough data has
been read or written for the current PIO transfer, and it fails to
correctly call the command completion functions, the DRQ bit in the
status register and s->end_transfer_func may remain set. This allows the
guest to access further bytes in s->io_buffer beyond s->data_end, and
eventually overflowing the io_buffer.
One case where this currently happens is emulation of the ATAPI command
START STOP UNIT.
This patch fixes the problem by adding explicit array bounds checks
before accessing the buffer instead of relying on end_transfer_func to
function correctly.
Cc: qemu-stable@nongnu.org
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/ide.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/hw/ide.c b/hw/ide.c
index 791666b..211ec88 100644
--- a/hw/ide.c
+++ b/hw/ide.c
@@ -3002,6 +3002,10 @@ static void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
buffered_pio_write(s, addr, 2);
p = s->data_ptr;
+ if (p + 2 > s->data_end) {
+ return;
+ }
+
*(uint16_t *)p = le16_to_cpu(val);
p += 2;
s->data_ptr = p;
@@ -3021,6 +3025,10 @@ static uint32_t ide_data_readw(void *opaque, uint32_t addr)
buffered_pio_read(s, addr, 2);
p = s->data_ptr;
+ if (p + 2 > s->data_end) {
+ return 0;
+ }
+
ret = cpu_to_le16(*(uint16_t *)p);
p += 2;
s->data_ptr = p;
@@ -3040,6 +3048,10 @@ static void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
buffered_pio_write(s, addr, 4);
p = s->data_ptr;
+ if (p + 4 > s->data_end) {
+ return;
+ }
+
*(uint32_t *)p = le32_to_cpu(val);
p += 4;
s->data_ptr = p;
@@ -3059,6 +3071,10 @@ static uint32_t ide_data_readl(void *opaque, uint32_t addr)
buffered_pio_read(s, addr, 4);
p = s->data_ptr;
+ if (p + 4 > s->data_end) {
+ return 0;
+ }
+
ret = cpu_to_le32(*(uint32_t *)p);
p += 4;
s->data_ptr = p;
--
2.1.4

View File

@ -1,71 +0,0 @@
From 1ac0f60d558b7fca55c69a61ab4c4538af1f02f9 Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 3 Jun 2015 14:41:27 +0200
Subject: [PATCH 2/2] ide: Clear DRQ after handling all expected accesses
This is additional hardening against an end_transfer_func that fails to
clear the DRQ status bit. The bit must be unset as soon as the PIO
transfer has completed, so it's better to do this in a central place
instead of duplicating the code in all commands (and forgetting it in
some).
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/ide.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/hw/ide.c b/hw/ide.c
index 211ec88..7b84d1b 100644
--- a/hw/ide.c
+++ b/hw/ide.c
@@ -3009,8 +3009,10 @@ static void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
*(uint16_t *)p = le16_to_cpu(val);
p += 2;
s->data_ptr = p;
- if (p >= s->data_end)
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
s->end_transfer_func(s);
+ }
}
static uint32_t ide_data_readw(void *opaque, uint32_t addr)
@@ -3032,8 +3034,10 @@ static uint32_t ide_data_readw(void *opaque, uint32_t addr)
ret = cpu_to_le16(*(uint16_t *)p);
p += 2;
s->data_ptr = p;
- if (p >= s->data_end)
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
s->end_transfer_func(s);
+ }
return ret;
}
@@ -3055,8 +3059,10 @@ static void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
*(uint32_t *)p = le32_to_cpu(val);
p += 4;
s->data_ptr = p;
- if (p >= s->data_end)
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
s->end_transfer_func(s);
+ }
}
static uint32_t ide_data_readl(void *opaque, uint32_t addr)
@@ -3078,8 +3084,10 @@ static uint32_t ide_data_readl(void *opaque, uint32_t addr)
ret = cpu_to_le32(*(uint32_t *)p);
p += 4;
s->data_ptr = p;
- if (p >= s->data_end)
+ if (p >= s->data_end) {
+ s->status &= ~DRQ_STAT;
s->end_transfer_func(s);
+ }
return ret;
}
--
2.1.4

View File

@ -0,0 +1,53 @@
From 07ca00703f76ad392eda5ee52cce1197cf49c30a Mon Sep 17 00:00:00 2001
From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [PATCH v2.1 for-4.5] libxl: handle read-only drives with qemu-xen
The current libxl code doesn't deal with read-only drives at all.
Upstream QEMU and qemu-xen only support read-only cdrom drives: make
sure to specify "readonly=on" for cdrom drives and return error in case
the user requested a non-cdrom read-only drive.
This is XSA-142, discovered by Lin Liu
(https://bugzilla.redhat.com/show_bug.cgi?id=1257893).
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Backport to Xen 4.5 and earlier, apropos of report and review from
Michael Young.
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
---
tools/libxl/libxl_dm.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
index b4ce523..d74fb14 100644
--- a/tools/libxl/libxl_dm.c
+++ b/tools/libxl/libxl_dm.c
@@ -797,13 +797,18 @@ static char ** libxl__build_device_model_args_new(libxl__gc *gc,
if (disks[i].is_cdrom) {
if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY)
drive = libxl__sprintf
- (gc, "if=ide,index=%d,media=cdrom,cache=writeback,id=ide-%i",
- disk, dev_number);
+ (gc, "if=ide,index=%d,readonly=%s,media=cdrom,cache=writeback,id=ide-%i",
+ disk, disks[i].readwrite ? "off" : "on", dev_number);
else
drive = libxl__sprintf
- (gc, "file=%s,if=ide,index=%d,media=cdrom,format=%s,cache=writeback,id=ide-%i",
- disks[i].pdev_path, disk, format, dev_number);
+ (gc, "file=%s,if=ide,index=%d,readonly=%s,media=cdrom,format=%s,cache=writeback,id=ide-%i",
+ disks[i].pdev_path, disk, disks[i].readwrite ? "off" : "on", format, dev_number);
} else {
+ if (!disks[i].readwrite) {
+ LIBXL__LOG(ctx, LIBXL__LOG_ERROR, "qemu-xen doesn't support read-only disk drivers");
+ return NULL;
+ }
+
if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY) {
LIBXL__LOG(ctx, LIBXL__LOG_WARNING, "cannot support"
" empty disk format for %s", disks[i].vdev);
--
1.7.10.4