Import of pkgsrc-2016Q3

This commit is contained in:
2016-10-14 07:49:11 +02:00
committed by Lionel Sambuc
parent 9d819b6d54
commit 1242aa1e36
35952 changed files with 949749 additions and 377083 deletions

View File

@@ -1,9 +1,9 @@
# $NetBSD: Makefile,v 1.45 2015/04/19 13:13:20 spz Exp $
# $NetBSD: Makefile,v 1.51 2016/09/08 15:41:01 bouyer Exp $
VERSION= 4.1.6.1
DISTNAME= xen-${VERSION}
PKGNAME= xenkernel41-${VERSION}
PKGREVISION= 16
PKGREVISION= 20
CATEGORIES= sysutils
MASTER_SITES= http://bits.xensource.com/oss-xen/release/${VERSION}/
@@ -21,7 +21,7 @@ USE_TOOLS+= gmake
PYTHON_FOR_BUILD_ONLY= yes
PY_PATCHPLIST= NO
PYTHON_VERSIONS_INCOMPATIBLE= 33 34 # not yet ported as of 4.1.6.1
PYTHON_VERSIONS_INCOMPATIBLE= 34 35 # not yet ported as of 4.1.6.1
# XXX Why does this not work?
# See work/xen-4.1.2/Config.mk:41 why PYTHON must be set and what for

View File

@@ -1,7 +1,8 @@
$NetBSD: distinfo,v 1.37 2015/09/14 13:36:29 joerg Exp $
$NetBSD: distinfo,v 1.44 2016/09/08 15:41:01 bouyer Exp $
SHA1 (xen-4.1.6.1.tar.gz) = e5f15feb0821578817a65ede16110c6eac01abd0
RMD160 (xen-4.1.6.1.tar.gz) = bff11421fc44a26f2cc3156713267abcb36d7a19
SHA512 (xen-4.1.6.1.tar.gz) = 5f6106514ffb57708009e3d6763824b13d9038699048d1a91fa09ad223e0391b92b6ea0f25714a0bbf8ac8373c58fc7871ca0bce9c3ff7873d41fb2eeae13ed8
Size (xen-4.1.6.1.tar.gz) = 10428485 bytes
SHA1 (patch-CVE-2013-1442) = 7aa43513ea7cddc50b4e6802412cfc2903cce8e1
SHA1 (patch-CVE-2013-4355_1) = 56dde995d7df4f18576040007fd5532de61d9069
@@ -15,7 +16,7 @@ SHA1 (patch-CVE-2013-4553) = 6708dcef1737b119a3fcf2e3414c22c115cbacc1
SHA1 (patch-CVE-2013-6885_1) = 18d155b2c76119988be32cfd43e3c4aa6a507b9d
SHA1 (patch-CVE-2013-6885_2) = be3c99ba3e349492d45cd4f2fce0acc26ac1a96d
SHA1 (patch-CVE-2014-1666) = acf27080799d4aae6a03b556caadb01081d5314e
SHA1 (patch-CVE-2014-3124) = 59a48eed88abcda5de2fc7e398451a492e5d2145
SHA1 (patch-CVE-2014-3124) = 0643b9b2b4bb3a976f59ec081e25f2b466e4fdba
SHA1 (patch-CVE-2014-4021) = ee8ee800b35f7eaa242b06536c1ffa6568305b36
SHA1 (patch-CVE-2014-7154) = 5f0541559d911778aa5267bb5c0e1e8a9a3904e2
SHA1 (patch-CVE-2014-7155) = 0f1aa6a5d4fdb8403fc1e01b884491a63de501f8
@@ -30,11 +31,21 @@ SHA1 (patch-CVE-2015-2045) = e1874bbde0cce7db4ee9260440f5280d404027d7
SHA1 (patch-CVE-2015-2151) = aed92f50d162febc3074f7edecaf6ca418d0b42c
SHA1 (patch-CVE-2015-2752) = 37f44989a3b3c69dea8e9de9fc34ffd5c2e8b087
SHA1 (patch-CVE-2015-2756) = b3b133d42229ecc8c308644b17e5317cd77f9a98
SHA1 (patch-CVE-2015-7835) = d66fe84abfb921bf435c1ed9b077012937d0c71e
SHA1 (patch-CVE-2015-7969) = 4eb96025afae4be547f74b9e71a7d8a3a37fc60b
SHA1 (patch-CVE-2015-7971) = 0d0d36ad99f313afb96111a832eb65ddeaf8010e
SHA1 (patch-CVE-2015-8339) = e5485ab9e73fa9a63c566505b8de805530ac678e
SHA1 (patch-Config.mk) = a43ed1b3304d6383dc093acd128a7f373d0ca266
SHA1 (patch-XSA-166) = 24fccf8e30ccf910a128e5e0365800191a90524c
SHA1 (patch-XSA-182) = 70a7a6175a4b87ffaf72cbc5a3932f076efa3f9c
SHA1 (patch-XSA-185) = a2313922aa4dad734b96c80f64fe54eca3c14019
SHA1 (patch-XSA-187-1) = 55ea0c2d9c7d8d9476a5ab97342ff552be4faf56
SHA1 (patch-XSA-187-2) = e21b24771fa9417f593b8f6d1550660bbad36b98
SHA1 (patch-xen_Makefile) = d1c7e4860221f93d90818f45a77748882486f92b
SHA1 (patch-xen_arch_x86_Rules.mk) = 6b9b4bfa28924f7d3f6c793a389f1a7ac9d228e2
SHA1 (patch-xen_arch_x86_cpu_mcheck_vmce.c) = 5afd01780a13654f1d21bf1562f6431c8370be0b
SHA1 (patch-xen_arch_x86_time.c) = 1611959c08ad79e3f042ac70c8d9d57b60225289
SHA1 (patch-xen_arch_x86_time.c) = 2c69ac1cb5e0ca06c4f70acb91d2723a32ce98a9
SHA1 (patch-xen_arch_x86_x86__64_entry.S) = 92bea7885c418e643bd9697abb9655bee9d1750b
SHA1 (patch-xen_drivers_char_console_c) = 0fe186369602ccffaeec6f4bfbee8bb4298d3ff0
SHA1 (patch-xen_drivers_passthrough_vtd_x86_ats.c) = 012ccbb27069c4f2e0361bd127397fdd22027f29
SHA1 (patch-xen_include_xen_stdarg.h) = e9df974a9b783ed442ab17497198432cb9844b70

View File

@@ -1,3 +1,5 @@
$NetBSD: patch-CVE-2014-3124,v 1.2 2015/12/29 04:04:32 dholland Exp $
--- xen/include/asm-x86/p2m.h.orig 2013-09-10 06:42:18.000000000 +0000
+++ xen/include/asm-x86/p2m.h 2014-04-30 13:11:30.000000000 +0000
@@ -134,6 +134,13 @@ typedef enum {

View File

@@ -0,0 +1,45 @@
$NetBSD: patch-CVE-2015-7835,v 1.1 2015/10/29 20:29:56 bouyer Exp $
Patch for CVE-2015-7835 aka XSA-148 based on
http://xenbits.xenproject.org/xsa/xsa148-4.4.patch
--- xen/include/asm-x86/x86_32/page.h.orig 2015-10-29 20:35:24.000000000 +0100
+++ xen/include/asm-x86/x86_32/page.h 2015-10-29 20:38:02.000000000 +0100
@@ -130,7 +130,9 @@
#define BASE_DISALLOW_MASK (0xFFFFF198U & ~_PAGE_NX)
#define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
-#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK & ~_PAGE_PSE)
+#define L2_DISALLOW_MASK (unlikely(opt_allow_superpage) \
+ ? BASE_DISALLOW_MASK & ~_PAGE_PSE \
+ : BASE_DISALLOW_MASK )
#define L3_DISALLOW_MASK 0xFFFFF1FEU /* must-be-zero */
#endif /* __X86_32_PAGE_H__ */
--- xen/include/asm-x86/x86_64/page.h.orig 2015-10-29 20:35:36.000000000 +0100
+++ xen/include/asm-x86/x86_64/page.h 2015-10-29 20:37:33.000000000 +0100
@@ -167,7 +167,9 @@
#define BASE_DISALLOW_MASK (0xFF800198U & ~_PAGE_NX)
#define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
-#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK & ~_PAGE_PSE)
+#define L2_DISALLOW_MASK (unlikely(opt_allow_superpage) \
+ ? BASE_DISALLOW_MASK & ~_PAGE_PSE \
+ : BASE_DISALLOW_MASK )
#define L3_DISALLOW_MASK (BASE_DISALLOW_MASK)
#define L4_DISALLOW_MASK (BASE_DISALLOW_MASK)
--- xen/arch/x86/mm.c.orig 2015-10-29 20:30:55.000000000 +0100
+++ xen/arch/x86/mm.c 2015-10-29 20:32:56.000000000 +0100
@@ -1898,7 +1898,10 @@
}
/* Fast path for identical mapping and presence. */
- if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) )
+ if ( !l2e_has_changed(ol2e, nl2e,
+ unlikely(opt_allow_superpage)
+ ? _PAGE_PSE | _PAGE_RW | _PAGE_PRESENT
+ : _PAGE_PRESENT) )
{
adjust_guest_l2e(nl2e, d);
rc = UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad);

View File

@@ -0,0 +1,35 @@
$NetBSD: patch-CVE-2015-7969,v 1.1 2015/10/29 20:29:56 bouyer Exp $
Patch for CVE-2015-7869 aka XSA-149 + XSA-151 based on
http://xenbits.xenproject.org/xsa/xsa149.patch
http://xenbits.xenproject.org/xsa/xsa151.patch
--- xen/common/domain.c.orig 2013-09-10 08:42:18.000000000 +0200
+++ xen/common/domain.c 2015-10-29 20:44:06.000000000 +0100
@@ -671,6 +671,7 @@
xfree(d->pirq_to_evtchn);
xsm_free_security_domain(d);
+ xfree(d->vcpu);
free_domain_struct(d);
send_guest_global_virq(dom0, VIRQ_DOM_EXC);
--- xen/common/xenoprof.c.orig
+++ xen/common/xenoprof.c
@@ -239,6 +239,7 @@ static int alloc_xenoprof_struct(
d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
if ( d->xenoprof->rawbuf == NULL )
{
+ xfree(d->xenoprof->vcpu);
xfree(d->xenoprof);
d->xenoprof = NULL;
return -ENOMEM;
@@ -286,6 +287,7 @@ void free_xenoprof_pages(struct domain *
free_xenheap_pages(x->rawbuf, order);
}
+ xfree(x->vcpu);
xfree(x);
d->xenoprof = NULL;
}

View File

@@ -0,0 +1,35 @@
$NetBSD: patch-CVE-2015-7971,v 1.1 2015/10/29 20:29:56 bouyer Exp $
Patch for CVE-2015-7971 aka XSA-152, based on
http://xenbits.xenproject.org/xsa/xsa152.patch
--- xen/common/xenoprof.c.orig
+++ xen/common/xenoprof.c
@@ -676,15 +676,13 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H
if ( (op < 0) || (op > XENOPROF_last_op) )
{
- printk("xenoprof: invalid operation %d for domain %d\n",
- op, current->domain->domain_id);
+ gdprintk(XENLOG_DEBUG, "invalid operation %d\n", op);
return -EINVAL;
}
if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
{
- printk("xenoprof: dom %d denied privileged operation %d\n",
- current->domain->domain_id, op);
+ gdprintk(XENLOG_DEBUG, "denied privileged operation %d\n", op);
return -EPERM;
}
@@ -907,8 +905,7 @@ ret_t do_xenoprof_op(int op, XEN_GUEST_H
spin_unlock(&xenoprof_lock);
if ( ret < 0 )
- printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
- op, current->domain->domain_id, ret);
+ gdprintk(XENLOG_DEBUG, "operation %d failed: %d\n", op, ret);
return ret;
}

View File

@@ -0,0 +1,20 @@
$NetBSD: patch-CVE-2015-8339,v 1.1 2016/01/07 17:55:55 bouyer Exp $
Patch for CVE-2015-8339 and CVE-2015-8340 aka XSA-159, based on
http://xenbits.xenproject.org/xsa/xsa159.patch
--- xen/common/memory.c.orig 2013-09-10 08:42:18.000000000 +0200
+++ xen/common/memory.c 2016-01-07 14:39:42.000000000 +0100
@@ -487,7 +487,11 @@
/* Reassign any input pages we managed to steal. */
while ( (page = page_list_remove_head(&in_chunk_list)) )
if ( assign_pages(d, page, 0, MEMF_no_refcount) )
- BUG();
+ {
+ BUG_ON(!d->is_dying);
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+ }
dying:
rcu_unlock_domain(d);
/* Free any output pages we managed to allocate. */

View File

@@ -0,0 +1,42 @@
$NetBSD: patch-XSA-166,v 1.1 2016/01/07 17:55:55 bouyer Exp $
Patch for XSA-166, based on
http://xenbits.xenproject.org/xsa/xsa166-4.3.patch
--- xen/arch/x86/hvm/hvm.c.orig
+++ xen/arch/x86/hvm/hvm.c
@@ -342,6 +342,7 @@ void hvm_migrate_pirqs(struct vcpu *v)
void hvm_do_resume(struct vcpu *v)
{
ioreq_t *p;
+ unsigned int state;
pt_restore_timer(v);
@@ -349,9 +350,10 @@ void hvm_do_resume(struct vcpu *v)
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
p = get_ioreq(v);
- while ( p->state != STATE_IOREQ_NONE )
+ while ( (state = p->state) != STATE_IOREQ_NONE )
{
- switch ( p->state )
+ rmb();
+ switch ( state )
{
case STATE_IORESP_READY: /* IORESP_READY -> NONE */
hvm_io_assist();
@@ -359,11 +361,10 @@ void hvm_do_resume(struct vcpu *v)
case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
case STATE_IOREQ_INPROCESS:
wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
- (p->state != STATE_IOREQ_READY) &&
- (p->state != STATE_IOREQ_INPROCESS));
+ p->state != state);
break;
default:
- gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
+ gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
domain_crash(v->domain);
return; /* bail */
}

View File

@@ -0,0 +1,90 @@
$NetBSD: patch-XSA-182,v 1.1 2016/07/26 15:59:20 bouyer Exp $
backported from:
From 798c1498f764bfaa7b0b955bab40b01b0610d372 Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Mon, 11 Jul 2016 14:32:03 +0100
Subject: [PATCH] x86/pv: Remove unsafe bits from the mod_l?_entry() fastpath
All changes in writeability and cacheability must go through full
re-validation.
Rework the logic as a whitelist, to make it clearer to follow.
This is XSA-182
--- xen/arch/x86/mm.c.orig 2016-07-26 16:51:13.000000000 +0200
+++ xen/arch/x86/mm.c 2016-07-26 16:53:07.000000000 +0200
@@ -1792,6 +1792,14 @@
_t ## e_get_intpte(_o), _t ## e_get_intpte(_n), \
(_m), (_v), (_ad))
+/*
+ * PTE flags that a guest may change without re-validating the PTE.
+ * All other bits affect translation, caching, or Xen's safety.
+ */
+#define FASTPATH_FLAG_WHITELIST \
+ (_PAGE_NX_BIT | _PAGE_AVAIL_HIGH | _PAGE_AVAIL | _PAGE_GLOBAL | \
+ _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER)
+
/* Update the L1 entry at pl1e to new value nl1e. */
static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
unsigned long gl1mfn, int preserve_ad,
@@ -1829,8 +1837,8 @@
return 0;
}
- /* Fast path for identical mapping, r/w and presence. */
- if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
+ /* Fast path for sufficiently-similar mappings.*/
+ if ( !l1e_has_changed(ol1e, nl1e, ~FASTPATH_FLAG_WHITELIST) )
{
adjust_guest_l1e(nl1e, pt_dom);
rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
@@ -1897,11 +1905,8 @@
return 0;
}
- /* Fast path for identical mapping and presence. */
- if ( !l2e_has_changed(ol2e, nl2e,
- unlikely(opt_allow_superpage)
- ? _PAGE_PSE | _PAGE_RW | _PAGE_PRESENT
- : _PAGE_PRESENT) )
+ /* Fast path for sufficiently-similar mappings. */
+ if ( !l2e_has_changed(ol2e, nl2e, ~FASTPATH_FLAG_WHITELIST) )
{
adjust_guest_l2e(nl2e, d);
rc = UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad);
@@ -1965,8 +1970,8 @@
return -EINVAL;
}
- /* Fast path for identical mapping and presence. */
- if ( !l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT) )
+ /* Fast path for sufficiently-similar mappings. */
+ if ( !l3e_has_changed(ol3e, nl3e, ~FASTPATH_FLAG_WHITELIST) )
{
adjust_guest_l3e(nl3e, d);
rc = UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, vcpu, preserve_ad);
@@ -2035,8 +2040,8 @@
return -EINVAL;
}
- /* Fast path for identical mapping and presence. */
- if ( !l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT) )
+ /* Fast path for sufficiently-similar mappings. */
+ if ( !l4e_has_changed(ol4e, nl4e, ~FASTPATH_FLAG_WHITELIST) )
{
adjust_guest_l4e(nl4e, d);
rc = UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, vcpu, preserve_ad);
--- xen/include/asm-x86/page.h.orig 2014-09-02 08:22:57.000000000 +0200
+++ xen/include/asm-x86/page.h 2016-07-26 16:39:51.000000000 +0200
@@ -332,6 +332,7 @@
#define _PAGE_AVAIL2 0x800U
#define _PAGE_AVAIL 0xE00U
#define _PAGE_PSE_PAT 0x1000U
+#define _PAGE_AVAIL_HIGH (0x7ffU << 12)
#define _PAGE_PAGED 0x2000U
#define _PAGE_SHARED 0x4000U

View File

@@ -0,0 +1,37 @@
$NetBSD: patch-XSA-185,v 1.1 2016/09/08 15:41:01 bouyer Exp $
From 30aba4992b18245c436f16df7326a16c01a51570 Mon Sep 17 00:00:00 2001
From: Jan Beulich <jbeulich@suse.com>
Date: Mon, 8 Aug 2016 10:58:12 +0100
Subject: x86/32on64: don't allow recursive page tables from L3
L3 entries are special in PAE mode, and hence can't reasonably be used
for setting up recursive (and hence linear) page table mappings. Since
abuse is possible when the guest in fact gets run on 4-level page
tables, this needs to be excluded explicitly.
This is XSA-185.
Reported-by: Jérémie Boutoille <jboutoille@ext.quarkslab.com>
Reported-by: 栾尚聪(好风) <shangcong.lsc@alibaba-inc.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
xen/arch/x86/mm.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 109b8be..69b8b8d 100644
--- xen/arch/x86/mm.c.orig
+++ xen/arch/x86/mm.c
@@ -1122,7 +1122,9 @@ get_page_from_l3e(
rc = get_page_and_type_from_pagenr(
l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, 1);
- if ( unlikely(rc == -EINVAL) && get_l3_linear_pagetable(l3e, pfn, d) )
+ if ( unlikely(rc == -EINVAL) &&
+ !is_pv_32bit_domain(d) &&
+ get_l3_linear_pagetable(l3e, pfn, d) )
rc = 0;
return rc;

View File

@@ -0,0 +1,44 @@
$NetBSD: patch-XSA-187-1,v 1.1 2016/09/08 15:41:01 bouyer Exp $
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/shadow: Avoid overflowing sh_ctxt->seg_reg[]
hvm_get_seg_reg() does not perform a range check on its input segment, calls
hvm_get_segment_register() and writes straight into sh_ctxt->seg_reg[].
x86_seg_none is outside the bounds of sh_ctxt->seg_reg[], and will hit a BUG()
in {vmx,svm}_get_segment_register().
HVM guests running with shadow paging can end up performing a virtual to
linear translation with x86_seg_none. This is used for addresses which are
already linear. However, none of this is a legitimate pagetable update, so
fail the emulation in such a case.
This is XSA-187
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
--- xen/arch/x86/mm/shadow/common.c.orig
+++ xen/arch/x86/mm/shadow/common.c
@@ -140,9 +140,18 @@ static int hvm_translate_linear_addr(
struct sh_emulate_ctxt *sh_ctxt,
unsigned long *paddr)
{
- struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt);
+ struct segment_register *reg;
int okay;
+ /*
+ * Can arrive here with non-user segments. However, no such cirucmstance
+ * is part of a legitimate pagetable update, so fail the emulation.
+ */
+ if ( !is_x86_user_segment(seg) )
+ return X86EMUL_UNHANDLEABLE;
+
+ reg = hvm_get_seg_reg(seg, sh_ctxt);
+
okay = hvm_virtual_to_linear_addr(
seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);

View File

@@ -0,0 +1,152 @@
$NetBSD: patch-XSA-187-2,v 1.1 2016/09/08 15:41:01 bouyer Exp $
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/segment: Bounds check accesses to emulation ctxt->seg_reg[]
HVM HAP codepaths have space for all segment registers in the seg_reg[]
cache (with x86_seg_none still risking an array overrun), while the shadow
codepaths only have space for the user segments.
Range check the input segment of *_get_seg_reg() against the size of the array
used to cache the results, to avoid overruns in the case that the callers
don't filter their input suitably.
Subsume the is_x86_user_segment(seg) checks from the shadow code, which were
an incomplete attempt at range checking, and are now superceeded. Make
hvm_get_seg_reg() static, as it is not used outside of shadow/common.c
No functional change, but far easier to reason that no overflow is possible.
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Jan Beulich <jbeulich@suse.com>
--- xen/include/asm-x86/hvm/emulate.h.orig 2014-09-02 08:22:57.000000000 +0200
+++ xen/include/asm-x86/hvm/emulate.h 2016-09-08 15:57:32.000000000 +0200
@@ -13,6 +13,7 @@
#define __ASM_X86_HVM_EMULATE_H__
#include <xen/config.h>
+#include <xen/err.h>
#include <asm/x86_emulate.h>
struct hvm_emulate_ctxt {
--- xen/arch/x86/hvm/emulate.c.orig 2014-09-02 08:22:57.000000000 +0200
+++ xen/arch/x86/hvm/emulate.c 2016-09-08 16:01:31.000000000 +0200
@@ -390,6 +390,8 @@
*reps = min_t(unsigned long, *reps, 4096);
reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
+ if ( IS_ERR(reg) )
+ return -PTR_ERR(reg);
if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
{
@@ -777,6 +779,10 @@
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
+
+ if ( IS_ERR(sreg) )
+ return -PTR_ERR(sreg);
+
memcpy(reg, sreg, sizeof(struct segment_register));
return X86EMUL_OKAY;
}
@@ -790,6 +796,9 @@
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
+ if ( IS_ERR(sreg) )
+ return -PTR_ERR(sreg);
+
memcpy(sreg, reg, sizeof(struct segment_register));
__set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
@@ -1130,10 +1139,17 @@
}
}
+/*
+ * Callers which pass a known in-range x86_segment can rely on the return
+ * pointer being valid. Other callers must explicitly check for errors.
+ */
struct segment_register *hvmemul_get_seg_reg(
enum x86_segment seg,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
+ if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) )
+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
+
if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) )
hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);
return &hvmemul_ctxt->seg_reg[seg];
--- xen/arch/x86/mm/shadow/common.c.orig 2016-09-08 17:15:35.000000000 +0200
+++ xen/arch/x86/mm/shadow/common.c 2016-09-08 17:29:23.000000000 +0200
@@ -22,6 +22,7 @@
*/
#include <xen/config.h>
+#include <xen/err.h>
#include <xen/types.h>
#include <xen/mm.h>
#include <xen/trace.h>
@@ -116,10 +117,19 @@
/* x86 emulator support for the shadow code
*/
+/*
+ * Callers which pass a known in-range x86_segment can rely on the return
+ * pointer being valid. Other callers must explicitly check for errors.
+ */
struct segment_register *hvm_get_seg_reg(
enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
{
- struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
+ struct segment_register *seg_reg;
+
+ if ( seg < 0 || seg >= ARRAY_SIZE(sh_ctxt->seg_reg) )
+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
+
+ seg_reg = &sh_ctxt->seg_reg[seg];
if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) )
hvm_get_segment_register(current, seg, seg_reg);
return seg_reg;
@@ -136,14 +146,9 @@
struct segment_register *reg;
int okay;
- /*
- * Can arrive here with non-user segments. However, no such cirucmstance
- * is part of a legitimate pagetable update, so fail the emulation.
- */
- if ( !is_x86_user_segment(seg) )
- return X86EMUL_UNHANDLEABLE;
-
reg = hvm_get_seg_reg(seg, sh_ctxt);
+ if ( IS_ERR(reg) )
+ return -PTR_ERR(reg);
okay = hvm_virtual_to_linear_addr(
seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
@@ -245,9 +250,6 @@
unsigned long addr;
int rc;
- if ( !is_x86_user_segment(seg) )
- return X86EMUL_UNHANDLEABLE;
-
/* How many emulations could we save if we unshadowed on stack writes? */
if ( seg == x86_seg_ss )
perfc_incr(shadow_fault_emulate_stack);
@@ -275,9 +277,6 @@
unsigned long addr, old[2], new[2];
int rc;
- if ( !is_x86_user_segment(seg) )
- return X86EMUL_UNHANDLEABLE;
-
rc = hvm_translate_linear_addr(
seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
if ( rc )

View File

@@ -1,3 +1,5 @@
$NetBSD: patch-xen_arch_x86_time.c,v 1.3 2015/12/29 04:04:32 dholland Exp $
--- xen/arch/x86/time.c.orig 2013-09-10 06:42:18.000000000 +0000
+++ xen/arch/x86/time.c 2013-09-11 14:30:13.000000000 +0000
@@ -105,7 +105,7 @@

View File

@@ -0,0 +1,13 @@
$NetBSD: patch-xen_arch_x86_x86__64_entry.S,v 1.1 2016/03/01 20:09:04 joerg Exp $
--- xen/arch/x86/x86_64/entry.S.orig 2016-02-29 20:42:10.767055585 +0000
+++ xen/arch/x86/x86_64/entry.S
@@ -51,7 +51,7 @@ restore_all_guest:
addq $8,%rsp
popq %rcx # RIP
popq %r11 # CS
- cmpw $FLAT_USER_CS32,%r11
+ cmpw $FLAT_USER_CS32,%r11w
popq %r11 # RFLAGS
popq %rsp # RSP
je 1f