1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-02 08:42:48 +00:00

Remove support for Xen PV domU kernels. Support for HVM domU kernels

remains.  Xen is planning to phase out support for PV upstream since it
is harder to maintain and has more overhead.  Modern x86 CPUs include
virtualization extensions that support HVM guests instead of PV guests.
In addition, the PV code was i386 only and not as well maintained recently
as the HVM code.
- Remove the i386-only NATIVE option that was used to disable certain
  components for PV kernels.  These components are now standard as they
  are on amd64.
- Remove !XENHVM bits from PV drivers.
- Remove various shims required for XEN (e.g. PT_UPDATES_FLUSH, LOAD_CR3,
  etc.)
- Remove duplicate copy of <xen/features.h>.
- Remove unused, i386-only xenstored.h.

Differential Revision:	https://reviews.freebsd.org/D2362
Reviewed by:	royger
Tested by:	royger (i386/amd64 HVM domU and amd64 PVH dom0)
Relnotes:	yes
This commit is contained in:
John Baldwin 2015-04-30 15:48:48 +00:00
parent 902945c770
commit ed95805e90
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=282274
62 changed files with 90 additions and 10628 deletions

View File

@ -35,14 +35,6 @@
.Nm xen
.Nd Xen Hypervisor Guest (DomU) Support
.Sh SYNOPSIS
To compile para-virtualized (PV) Xen guest support into an i386 kernel, place
the following lines in your kernel configuration file:
.Bd -ragged -offset indent
.Cd "options PAE"
.Cd "options XEN"
.Cd "nooptions NATIVE"
.Ed
.Pp
To compile hardware-assisted virtualization (HVM) Xen guest support with
para-virtualized drivers into an amd64 or i386 kernel,
place the following lines in your kernel configuration file:
@ -69,34 +61,14 @@ and hence able to optimize certain behaviors to improve performance or
semantics.
.Pp
.Fx
supports a fully para-virtualized (PV) kernel on the i386 architecture using
.Cd "options XEN"
and
.Cd "nooptions NATIVE" ;
currently, this requires use of a PAE kernel, enabled via
.Cd "options PAE" .
.Pp
.Fx
supports hardware-assisted virtualization (HVM) on both the i386 and amd64
kernels; however, PV device drivers with an HVM kernel are only supported on
the amd64 architecture, and require
.Cd "options XENHVM"
and
.Cd "device xenpci" .
supports hardware-assisted virtualization (HVM) on both i386 and amd64
kernels.
.Pp
Para-virtualized device drivers are required in order to support certain
functionality, such as processing management requests, returning idle
physical memory pages to the hypervisor, etc.
.Ss Xen DomU device drivers
Xen para-virtualized drivers are automatically added to the kernel if a PV
kernel is compiled using
.Cd "options XEN" ;
for HVM environments,
.Cd "options XENHVM"
and
.Cd "device xenpci"
are required.
The follow drivers are supported:
These para-virtualized drivers are supported:
.Bl -hang -offset indent -width blkfront
.It Nm balloon
Allow physical memory pages to be returned to the hypervisor as a result of
@ -148,8 +120,6 @@ It is recommended that adaptive locking be disabled when using Xen:
.Cd "options NO_ADAPTIVE_RWLOCKS"
.Cd "options NO_ADAPTIVE_SX"
.Ed
.Sh SEE ALSO
.Xr pae 4
.Sh HISTORY
Support for
.Nm
@ -173,9 +143,6 @@ This manual page was written by
.Fx
is only able to run as a Xen guest (DomU) and not as a Xen host (Dom0).
.Pp
A fully para-virtualized (PV) kernel is only supported on i386, and not
amd64.
.Pp
As of this release, Xen PV DomU support is not heavily tested; instability
has been reported during VM migration of PV kernels.
.Pp

View File

@ -29,12 +29,7 @@
#ifndef _XEN_XENFUNC_H_
#define _XEN_XENFUNC_H_
#ifdef XENHVM
#include <machine/xen/xenvar.h>
#else
#include <machine/xen/xenpmap.h>
#include <machine/segments.h>
#endif
#define BKPT __asm__("int3");
#define XPQ_CALL_DEPTH 5
@ -64,10 +59,6 @@ void _xen_machphys_update(vm_paddr_t, vm_paddr_t, char *file, int line);
#define xen_machphys_update(a, b) _xen_machphys_update((a), (b), NULL, 0)
#endif
#ifndef XENHVM
void xen_update_descriptor(union descriptor *, union descriptor *);
#endif
extern struct mtx balloon_lock;
#if 0
#define balloon_lock(__flags) mtx_lock_irqsave(&balloon_lock, __flags)

View File

@ -1,227 +0,0 @@
/*
*
* Copyright (c) 2004 Christian Limpach.
* Copyright (c) 2004,2005 Kip Macy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Christian Limpach.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _XEN_XENPMAP_H_
#define _XEN_XENPMAP_H_
#include <machine/xen/features.h>
void _xen_queue_pt_update(vm_paddr_t, vm_paddr_t, char *, int);
void xen_pt_switch(vm_paddr_t);
void xen_set_ldt(vm_paddr_t, unsigned long);
void xen_pgdpt_pin(vm_paddr_t);
void xen_pgd_pin(vm_paddr_t);
void xen_pgd_unpin(vm_paddr_t);
void xen_pt_pin(vm_paddr_t);
void xen_pt_unpin(vm_paddr_t);
void xen_flush_queue(void);
void xen_check_queue(void);
#if 0
void pmap_ref(pt_entry_t *pte, vm_paddr_t ma);
#endif
#ifdef INVARIANTS
#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), __FILE__, __LINE__)
#else
#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), NULL, 0)
#endif
#ifdef PMAP_DEBUG
#define PMAP_REF pmap_ref
#define PMAP_DEC_REF_PAGE pmap_dec_ref_page
#define PMAP_MARK_PRIV pmap_mark_privileged
#define PMAP_MARK_UNPRIV pmap_mark_unprivileged
#else
#define PMAP_MARK_PRIV(a)
#define PMAP_MARK_UNPRIV(a)
#define PMAP_REF(a, b)
#define PMAP_DEC_REF_PAGE(a)
#endif
#define ALWAYS_SYNC 0
#ifdef PT_DEBUG
#define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__)
#else
#define PT_LOG()
#endif
#define INVALID_P2M_ENTRY (~0UL)
#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
#define SH_PD_SET_VA 1
#define SH_PD_SET_VA_MA 2
#define SH_PD_SET_VA_CLEAR 3
struct pmap;
void pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type);
#ifdef notyet
static vm_paddr_t
vptetomachpte(vm_paddr_t *pte)
{
vm_offset_t offset, ppte;
vm_paddr_t pgoffset, retval, *pdir_shadow_ptr;
int pgindex;
ppte = (vm_offset_t)pte;
pgoffset = (ppte & PAGE_MASK);
offset = ppte - (vm_offset_t)PTmap;
pgindex = ppte >> PDRSHIFT;
pdir_shadow_ptr = (vm_paddr_t *)PCPU_GET(pdir_shadow);
retval = (pdir_shadow_ptr[pgindex] & ~PAGE_MASK) + pgoffset;
return (retval);
}
#endif
#define PT_GET(_ptp) \
(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0))
#ifdef WRITABLE_PAGETABLES
#define PT_SET_VA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
PT_LOG(); \
*(_ptp) = xpmap_ptom((_npte)); \
} while (/*CONSTCOND*/0)
#define PT_SET_VA_MA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), (_npte)); \
PT_LOG(); \
*(_ptp) = (_npte); \
} while (/*CONSTCOND*/0)
#define PT_CLEAR_VA(_ptp, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
PT_LOG(); \
*(_ptp) = 0; \
} while (/*CONSTCOND*/0)
#define PD_SET_VA(_pmap, _ptp, _npte, sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do { \
PMAP_REF((_ptp), (_npte)); \
pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_CLEAR_VA(_pmap, _ptp, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#else /* !WRITABLE_PAGETABLES */
#define PT_SET_VA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
xen_queue_pt_update(vtomach(_ptp), \
xpmap_ptom(_npte)); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PT_SET_VA_MA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), (_npte)); \
xen_queue_pt_update(vtomach(_ptp), _npte); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PT_CLEAR_VA(_ptp, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
xen_queue_pt_update(vtomach(_ptp), 0); \
if (sync || ALWAYS_SYNC) \
xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_SET_VA(_pmap, _ptepindex,_npte,sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_SET_VA_MA(_pmap, _ptepindex,_npte,sync) do { \
PMAP_REF((_ptp), (_npte)); \
pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA_MA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_CLEAR_VA(_pmap, _ptepindex, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
pd_set((_pmap),(_ptepindex), 0, SH_PD_SET_VA_CLEAR); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#endif
#define PT_SET_MA(_va, _ma) \
do { \
PANIC_IF(HYPERVISOR_update_va_mapping(((unsigned long)(_va)),\
(_ma), \
UVMF_INVLPG| UVMF_ALL) < 0); \
} while (/*CONSTCOND*/0)
#define PT_UPDATES_FLUSH() do { \
xen_flush_queue(); \
} while (/*CONSTCOND*/0)
static __inline vm_paddr_t
xpmap_mtop(vm_paddr_t mpa)
{
vm_paddr_t tmp = (mpa & PG_FRAME);
return machtophys(tmp) | (mpa & ~PG_FRAME);
}
static __inline vm_paddr_t
xpmap_ptom(vm_paddr_t ppa)
{
vm_paddr_t tmp = (ppa & PG_FRAME);
return phystomach(tmp) | (ppa & ~PG_FRAME);
}
static __inline void
set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
#ifdef notyet
PANIC_IF(max_mapnr && pfn >= max_mapnr);
#endif
if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef notyet
PANIC_IF((pfn != mfn && mfn != INVALID_P2M_ENTRY));
#endif
return;
}
xen_phys_machine[pfn] = mfn;
}
#endif /* _XEN_XENPMAP_H_ */

View File

@ -48,68 +48,7 @@ if (xendebug_flags & argflags) XENPRINTF("(file=%s, line=%d) " _f "\n", __FILE__
#define TRACE_DEBUG(argflags, _f, _a...)
#endif
#ifdef XENHVM
static inline vm_paddr_t
phystomach(vm_paddr_t pa)
{
return (pa);
}
static inline vm_paddr_t
machtophys(vm_paddr_t ma)
{
return (ma);
}
#define vtomach(va) pmap_kextract((vm_offset_t) (va))
#define PFNTOMFN(pa) (pa)
#define MFNTOPFN(ma) (ma)
#define set_phys_to_machine(pfn, mfn) ((void)0)
#define phys_to_machine_mapping_valid(pfn) (TRUE)
#define PT_UPDATES_FLUSH() ((void)0)
#else
extern xen_pfn_t *xen_phys_machine;
extern xen_pfn_t *xen_machine_phys;
/* Xen starts physical pages after the 4MB ISA hole -
* FreeBSD doesn't
*/
#undef ADD_ISA_HOLE /* XXX */
#ifdef ADD_ISA_HOLE
#define ISA_INDEX_OFFSET 1024
#define ISA_PDR_OFFSET 1
#else
#define ISA_INDEX_OFFSET 0
#define ISA_PDR_OFFSET 0
#endif
#define PFNTOMFN(i) (xen_phys_machine[(i)])
#define MFNTOPFN(i) ((vm_paddr_t)xen_machine_phys[(i)])
#define VTOP(x) ((((uintptr_t)(x))) - KERNBASE)
#define PTOV(x) (((uintptr_t)(x)) + KERNBASE)
#define VTOPFN(x) (VTOP(x) >> PAGE_SHIFT)
#define PFNTOV(x) PTOV((vm_paddr_t)(x) << PAGE_SHIFT)
#define VTOMFN(va) (vtomach(va) >> PAGE_SHIFT)
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define phystomach(pa) (((vm_paddr_t)(PFNTOMFN((pa) >> PAGE_SHIFT))) << PAGE_SHIFT)
#define machtophys(ma) (((vm_paddr_t)(MFNTOPFN((ma) >> PAGE_SHIFT))) << PAGE_SHIFT)
#endif
void xpq_init(void);

View File

@ -2676,24 +2676,24 @@ wpi.fw optional wpifw \
clean "wpi.fw"
dev/xe/if_xe.c optional xe
dev/xe/if_xe_pccard.c optional xe pccard
dev/xen/balloon/balloon.c optional xen | xenhvm
dev/xen/blkfront/blkfront.c optional xen | xenhvm
dev/xen/blkback/blkback.c optional xen | xenhvm
dev/xen/console/console.c optional xen | xenhvm
dev/xen/console/xencons_ring.c optional xen | xenhvm
dev/xen/control/control.c optional xen | xenhvm
dev/xen/grant_table/grant_table.c optional xen | xenhvm
dev/xen/netback/netback.c optional xen | xenhvm
dev/xen/netfront/netfront.c optional xen | xenhvm
dev/xen/balloon/balloon.c optional xenhvm
dev/xen/blkfront/blkfront.c optional xenhvm
dev/xen/blkback/blkback.c optional xenhvm
dev/xen/console/console.c optional xenhvm
dev/xen/console/xencons_ring.c optional xenhvm
dev/xen/control/control.c optional xenhvm
dev/xen/grant_table/grant_table.c optional xenhvm
dev/xen/netback/netback.c optional xenhvm
dev/xen/netfront/netfront.c optional xenhvm
dev/xen/xenpci/xenpci.c optional xenpci
dev/xen/timer/timer.c optional xen | xenhvm
dev/xen/pvcpu/pvcpu.c optional xen | xenhvm
dev/xen/xenstore/xenstore.c optional xen | xenhvm
dev/xen/xenstore/xenstore_dev.c optional xen | xenhvm
dev/xen/xenstore/xenstored_dev.c optional xen | xenhvm
dev/xen/evtchn/evtchn_dev.c optional xen | xenhvm
dev/xen/privcmd/privcmd.c optional xen | xenhvm
dev/xen/debug/debug.c optional xen | xenhvm
dev/xen/timer/timer.c optional xenhvm
dev/xen/pvcpu/pvcpu.c optional xenhvm
dev/xen/xenstore/xenstore.c optional xenhvm
dev/xen/xenstore/xenstore_dev.c optional xenhvm
dev/xen/xenstore/xenstored_dev.c optional xenhvm
dev/xen/evtchn/evtchn_dev.c optional xenhvm
dev/xen/privcmd/privcmd.c optional xenhvm
dev/xen/debug/debug.c optional xenhvm
dev/xl/if_xl.c optional xl pci
dev/xl/xlphy.c optional xl pci
fs/autofs/autofs.c optional autofs
@ -4043,13 +4043,13 @@ vm/vm_reserv.c standard
vm/vm_unix.c standard
vm/vm_zeroidle.c standard
vm/vnode_pager.c standard
xen/features.c optional xen | xenhvm
xen/xenbus/xenbus_if.m optional xen | xenhvm
xen/xenbus/xenbus.c optional xen | xenhvm
xen/xenbus/xenbusb_if.m optional xen | xenhvm
xen/xenbus/xenbusb.c optional xen | xenhvm
xen/xenbus/xenbusb_front.c optional xen | xenhvm
xen/xenbus/xenbusb_back.c optional xen | xenhvm
xen/features.c optional xenhvm
xen/xenbus/xenbus_if.m optional xenhvm
xen/xenbus/xenbus.c optional xenhvm
xen/xenbus/xenbusb_if.m optional xenhvm
xen/xenbus/xenbusb.c optional xenhvm
xen/xenbus/xenbusb_front.c optional xenhvm
xen/xenbus/xenbusb_back.c optional xenhvm
xdr/xdr.c optional krpc | nfslockd | nfscl | nfsd
xdr/xdr_array.c optional krpc | nfslockd | nfscl | nfsd
xdr/xdr_mbuf.c optional krpc | nfslockd | nfscl | nfsd

View File

@ -577,7 +577,7 @@ x86/x86/pvclock.c standard
x86/x86/tsc.c standard
x86/x86/delay.c standard
x86/xen/hvm.c optional xenhvm
x86/xen/xen_intr.c optional xen | xenhvm
x86/xen/xen_intr.c optional xenhvm
x86/xen/pv.c optional xenhvm
x86/xen/pvcpu_enum.c optional xenhvm
x86/xen/xen_apic.c optional xenhvm

View File

@ -428,16 +428,15 @@ i386/bios/smapi_bios.S optional smapi
i386/i386/atomic.c standard \
compile-with "${CC} -c ${CFLAGS} ${DEFINED_PROF:S/^$/-fomit-frame-pointer/} ${.IMPSRC}"
i386/i386/autoconf.c standard
i386/i386/bios.c optional native
i386/i386/bioscall.s optional native
i386/i386/bios.c standard
i386/i386/bioscall.s standard
i386/i386/bpf_jit_machdep.c optional bpf_jitter
i386/i386/db_disasm.c optional ddb
i386/i386/db_interface.c optional ddb
i386/i386/db_trace.c optional ddb
i386/i386/elan-mmcr.c optional cpu_elan | cpu_soekris
i386/i386/elf_machdep.c standard
i386/i386/exception.s optional native
i386/xen/exception.s optional xen
i386/i386/exception.s standard
i386/i386/gdb_machdep.c optional gdb
i386/i386/geode.c optional cpu_geode
i386/i386/i686_mem.c optional mem
@ -445,22 +444,17 @@ i386/i386/in_cksum.c optional inet | inet6
i386/i386/initcpu.c standard
i386/i386/io.c optional io
i386/i386/k6_mem.c optional mem
i386/i386/locore.s optional native no-obj
i386/xen/locore.s optional xen no-obj
i386/i386/locore.s standard no-obj
i386/i386/longrun.c optional cpu_enable_longrun
i386/i386/machdep.c standard
i386/xen/xen_machdep.c optional xen
i386/i386/mem.c optional mem
i386/i386/minidump_machdep.c standard
i386/i386/mp_clock.c optional smp
i386/i386/mp_machdep.c optional native smp
i386/xen/mp_machdep.c optional xen smp
i386/i386/mp_machdep.c optional smp
i386/i386/mp_watchdog.c optional mp_watchdog smp
i386/i386/mpboot.s optional smp native
i386/xen/mptable.c optional apic xen
i386/i386/mpboot.s optional smp
i386/i386/perfmon.c optional perfmon
i386/i386/pmap.c optional native
i386/xen/pmap.c optional xen
i386/i386/pmap.c standard
i386/i386/ptrace_machdep.c standard
i386/i386/stack_machdep.c optional ddb | stack
i386/i386/support.s standard
@ -489,7 +483,6 @@ i386/ibcs2/ibcs2_util.c optional ibcs2
i386/ibcs2/ibcs2_xenix.c optional ibcs2
i386/ibcs2/ibcs2_xenix_sysent.c optional ibcs2
i386/ibcs2/imgact_coff.c optional ibcs2
i386/xen/clock.c optional xen
i386/isa/elink.c optional ep | ie
i386/isa/npx.c optional npx
i386/isa/pmtimer.c optional pmtimer
@ -566,9 +559,9 @@ x86/iommu/intel_qi.c optional acpi acpi_dmar pci
x86/iommu/intel_quirks.c optional acpi acpi_dmar pci
x86/iommu/intel_utils.c optional acpi acpi_dmar pci
x86/isa/atpic.c optional atpic
x86/isa/atrtc.c optional native
x86/isa/clock.c optional native
x86/isa/elcr.c optional atpic | apic native
x86/isa/atrtc.c standard
x86/isa/clock.c standard
x86/isa/elcr.c optional atpic | apic
x86/isa/isa.c optional isa
x86/isa/isa_dma.c optional isa
x86/isa/nmi.c standard
@ -583,20 +576,20 @@ x86/x86/fdt_machdep.c optional fdt
x86/x86/identcpu.c standard
x86/x86/intr_machdep.c standard
x86/x86/io_apic.c optional apic
x86/x86/legacy.c optional native
x86/x86/legacy.c standard
x86/x86/local_apic.c optional apic
x86/x86/mca.c standard
x86/x86/mptable.c optional apic native
x86/x86/mptable_pci.c optional apic native pci
x86/x86/mp_x86.c optional native smp
x86/x86/mptable.c optional apic
x86/x86/mptable_pci.c optional apic pci
x86/x86/mp_x86.c optional smp
x86/x86/msi.c optional apic pci
x86/x86/nexus.c standard
x86/x86/tsc.c standard
x86/x86/pvclock.c standard
x86/x86/delay.c standard
x86/xen/hvm.c optional xenhvm
x86/xen/xen_intr.c optional xen | xenhvm
x86/xen/xen_intr.c optional xenhvm
x86/xen/xen_apic.c optional xenhvm
x86/xen/xenpv.c optional xen | xenhvm
x86/xen/xen_nexus.c optional xen | xenhvm
x86/xen/xen_msi.c optional xen | xenhvm
x86/xen/xenpv.c optional xenhvm
x86/xen/xen_nexus.c optional xenhvm
x86/xen/xen_msi.c optional xenhvm

View File

@ -121,8 +121,6 @@ NPX_DEBUG opt_npx.h
# BPF just-in-time compiler
BPF_JITTER opt_bpf.h
NATIVE opt_global.h
XEN opt_global.h
XENHVM opt_global.h
HYPERV opt_global.h

View File

@ -118,11 +118,6 @@ current_target(void)
static unsigned long
minimum_target(void)
{
#ifdef XENHVM
#define max_pfn realmem
#else
#define max_pfn HYPERVISOR_shared_info->arch.max_pfn
#endif
unsigned long min_pages, curr_pages = current_target();
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
@ -139,16 +134,15 @@ minimum_target(void)
* 32768 1320
* 131072 4392
*/
if (max_pfn < MB2PAGES(128))
min_pages = MB2PAGES(8) + (max_pfn >> 1);
else if (max_pfn < MB2PAGES(512))
min_pages = MB2PAGES(40) + (max_pfn >> 2);
else if (max_pfn < MB2PAGES(2048))
min_pages = MB2PAGES(104) + (max_pfn >> 3);
if (realmem < MB2PAGES(128))
min_pages = MB2PAGES(8) + (realmem >> 1);
else if (realmem < MB2PAGES(512))
min_pages = MB2PAGES(40) + (realmem >> 2);
else if (realmem < MB2PAGES(2048))
min_pages = MB2PAGES(104) + (realmem >> 3);
else
min_pages = MB2PAGES(296) + (max_pfn >> 5);
min_pages = MB2PAGES(296) + (realmem >> 5);
#undef MB2PAGES
#undef max_pfn
/* Don't enforce growth */
return (min(min_pages, curr_pages));
@ -204,12 +198,9 @@ increase_reservation(unsigned long nr_pages)
bs.balloon_low--;
pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
KASSERT((xen_feature(XENFEAT_auto_translated_physmap) ||
!phys_to_machine_mapping_valid(pfn)),
KASSERT(xen_feature(XENFEAT_auto_translated_physmap),
("auto translated physmap but mapping is valid"));
set_phys_to_machine(pfn, frame_list[i]);
vm_page_free(page);
}
@ -258,9 +249,8 @@ decrease_reservation(unsigned long nr_pages)
}
pfn = (VM_PAGE_TO_PHYS(page) >> PAGE_SHIFT);
frame_list[i] = PFNTOMFN(pfn);
frame_list[i] = pfn;
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
TAILQ_INSERT_HEAD(&ballooned_pages, page, plinks.q);
bs.balloon_low++;
}
@ -393,21 +383,11 @@ static int
xenballoon_attach(device_t dev)
{
int err;
#ifndef XENHVM
vm_page_t page;
unsigned long pfn;
#define max_pfn HYPERVISOR_shared_info->arch.max_pfn
#endif
mtx_init(&balloon_mutex, "balloon_mutex", NULL, MTX_DEF);
#ifndef XENHVM
bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
#else
bs.current_pages = xen_pv_domain() ?
HYPERVISOR_start_info->nr_pages : realmem;
#endif
bs.target_pages = bs.current_pages;
bs.balloon_low = 0;
bs.balloon_high = 0;
@ -416,16 +396,6 @@ xenballoon_attach(device_t dev)
kproc_create(balloon_process, NULL, NULL, 0, 0, "balloon");
#ifndef XENHVM
/* Initialise the balloon with excess memory space. */
for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
page = PHYS_TO_VM_PAGE(pfn << PAGE_SHIFT);
TAILQ_INSERT_HEAD(&ballooned_pages, page, plinks.q);
bs.balloon_low++;
}
#undef max_pfn
#endif
target_watch.callback = watch_target;
err = xs_register_watch(&target_watch);

View File

@ -742,7 +742,6 @@ struct xbb_softc {
/** Mutex protecting per-instance data. */
struct mtx lock;
#ifdef XENHVM
/**
* Resource representing allocated physical address space
* associated with our per-instance kva region.
@ -751,7 +750,6 @@ struct xbb_softc {
/** Resource id for allocated physical address space. */
int pseudo_phys_res_id;
#endif
/**
* I/O statistics from BlockBack dispatch down. These are
@ -2818,16 +2816,12 @@ static void
xbb_free_communication_mem(struct xbb_softc *xbb)
{
if (xbb->kva != 0) {
#ifndef XENHVM
kva_free(xbb->kva, xbb->kva_size);
#else
if (xbb->pseudo_phys_res != NULL) {
bus_release_resource(xbb->dev, SYS_RES_MEMORY,
xbb->pseudo_phys_res_id,
xbb->pseudo_phys_res);
xbb->pseudo_phys_res = NULL;
}
#endif
}
xbb->kva = 0;
xbb->gnt_base_addr = 0;
@ -3055,12 +3049,6 @@ xbb_alloc_communication_mem(struct xbb_softc *xbb)
DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n",
device_get_nameunit(xbb->dev), xbb->kva_size,
xbb->reqlist_kva_size);
#ifndef XENHVM
xbb->kva = kva_alloc(xbb->kva_size);
if (xbb->kva == 0)
return (ENOMEM);
xbb->gnt_base_addr = xbb->kva;
#else /* XENHVM */
/*
* Reserve a range of pseudo physical memory that we can map
* into kva. These pages will only be backed by machine
@ -3078,7 +3066,6 @@ xbb_alloc_communication_mem(struct xbb_softc *xbb)
}
xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
#endif /* XENHVM */
DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n",
device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,

View File

@ -138,9 +138,7 @@ __FBSDID("$FreeBSD$");
#include <xen/gnttab.h>
#include <xen/xen_intr.h>
#ifdef XENHVM
#include <xen/hvm.h>
#endif
#include <xen/interface/event_channel.h>
#include <xen/interface/grant_table.h>
@ -192,133 +190,6 @@ xctrl_reboot()
shutdown_nice(0);
}
#ifndef XENHVM
extern void xencons_suspend(void);
extern void xencons_resume(void);
/* Full PV mode suspension. */
static void
xctrl_suspend()
{
int i, j, k, fpp, suspend_cancelled;
unsigned long max_pfn, start_info_mfn;
EVENTHANDLER_INVOKE(power_suspend);
#ifdef SMP
struct thread *td;
cpuset_t map;
u_int cpuid;
/*
* Bind us to CPU 0 and stop any other VCPUs.
*/
td = curthread;
thread_lock(td);
sched_bind(td, 0);
thread_unlock(td);
cpuid = PCPU_GET(cpuid);
KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0"));
map = all_cpus;
CPU_CLR(cpuid, &map);
CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map))
stop_cpus(map);
#endif
/*
* Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
* drivers need this.
*/
mtx_lock(&Giant);
if (DEVICE_SUSPEND(root_bus) != 0) {
mtx_unlock(&Giant);
printf("%s: device_suspend failed\n", __func__);
#ifdef SMP
if (!CPU_EMPTY(&map))
restart_cpus(map);
#endif
return;
}
mtx_unlock(&Giant);
local_irq_disable();
xencons_suspend();
gnttab_suspend();
intr_suspend();
max_pfn = HYPERVISOR_shared_info->arch.max_pfn;
void *shared_info = HYPERVISOR_shared_info;
HYPERVISOR_shared_info = NULL;
pmap_kremove((vm_offset_t) shared_info);
PT_UPDATES_FLUSH();
xen_start_info->store_mfn = MFNTOPFN(xen_start_info->store_mfn);
xen_start_info->console.domU.mfn = MFNTOPFN(xen_start_info->console.domU.mfn);
/*
* We'll stop somewhere inside this hypercall. When it returns,
* we'll start resuming after the restore.
*/
start_info_mfn = VTOMFN(xen_start_info);
pmap_suspend();
suspend_cancelled = HYPERVISOR_suspend(start_info_mfn);
pmap_resume();
pmap_kenter_ma((vm_offset_t) shared_info, xen_start_info->shared_info);
HYPERVISOR_shared_info = shared_info;
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
VTOMFN(xen_pfn_to_mfn_frame_list_list);
fpp = PAGE_SIZE/sizeof(unsigned long);
for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
if ((j % fpp) == 0) {
k++;
xen_pfn_to_mfn_frame_list_list[k] =
VTOMFN(xen_pfn_to_mfn_frame_list[k]);
j = 0;
}
xen_pfn_to_mfn_frame_list[k][j] =
VTOMFN(&xen_phys_machine[i]);
}
HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
gnttab_resume(NULL);
intr_resume(suspend_cancelled != 0);
local_irq_enable();
xencons_resume();
#ifdef CONFIG_SMP
for_each_cpu(i)
vcpu_prepare(i);
#endif
/*
* Only resume xenbus /after/ we've prepared our VCPUs; otherwise
* the VCPU hotplug callback can race with our vcpu_prepare
*/
mtx_lock(&Giant);
DEVICE_RESUME(root_bus);
mtx_unlock(&Giant);
#ifdef SMP
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
if (!CPU_EMPTY(&map))
restart_cpus(map);
#endif
EVENTHANDLER_INVOKE(power_resume);
}
#else
/* HVM mode suspension. */
static void
xctrl_suspend()
{
@ -417,7 +288,6 @@ xctrl_suspend()
printf("System resumed after suspension\n");
}
#endif
static void
xctrl_crash()

View File

@ -53,7 +53,6 @@ static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static struct mtx gnttab_list_lock;
#ifdef XENHVM
/*
* Resource representing allocated physical address space
* for the grant table metainfo
@ -62,7 +61,6 @@ static struct resource *gnttab_pseudo_phys_res;
/* Resource id for allocated physical address space. */
static int gnttab_pseudo_phys_res_id;
#endif
static grant_entry_t *shared;
@ -510,72 +508,6 @@ unmap_pte_fn(pte_t *pte, struct page *pmd_page,
}
#endif
#ifndef XENHVM
static int
gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
u_long *frames;
unsigned int nr_gframes = end_idx + 1;
int i, rc;
frames = malloc(nr_gframes * sizeof(unsigned long), M_DEVBUF, M_NOWAIT);
if (!frames)
return (ENOMEM);
setup.dom = DOMID_SELF;
setup.nr_frames = nr_gframes;
set_xen_guest_handle(setup.frame_list, frames);
rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
if (rc == -ENOSYS) {
free(frames, M_DEVBUF);
return (ENOSYS);
}
KASSERT(!(rc || setup.status),
("unexpected result from grant_table_op"));
if (shared == NULL) {
vm_offset_t area;
area = kva_alloc(PAGE_SIZE * max_nr_grant_frames());
KASSERT(area, ("can't allocate VM space for grant table"));
shared = (grant_entry_t *)area;
}
for (i = 0; i < nr_gframes; i++)
PT_SET_MA(((caddr_t)shared) + i*PAGE_SIZE,
((vm_paddr_t)frames[i]) << PAGE_SHIFT | PG_RW | PG_V);
free(frames, M_DEVBUF);
return (0);
}
int
gnttab_resume(device_t dev)
{
if (max_nr_grant_frames() < nr_grant_frames)
return (ENOSYS);
return (gnttab_map(0, nr_grant_frames - 1));
}
int
gnttab_suspend(void)
{
int i;
for (i = 0; i < nr_grant_frames; i++)
pmap_kremove((vm_offset_t) shared + i * PAGE_SIZE);
return (0);
}
#else /* XENHVM */
static vm_paddr_t resume_frames;
static int
@ -638,8 +570,6 @@ gnttab_resume(device_t dev)
return (gnttab_map(0, nr_gframes - 1));
}
#endif
static int
gnttab_expand(unsigned int req_entries)
{

View File

@ -473,7 +473,6 @@ struct xnb_softc {
*/
gnttab_copy_table tx_gnttab;
#ifdef XENHVM
/**
* Resource representing allocated physical address space
* associated with our per-instance kva region.
@ -482,7 +481,6 @@ struct xnb_softc {
/** Resource id for allocated physical address space. */
int pseudo_phys_res_id;
#endif
/** Ring mapping and interrupt configuration data. */
struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES];
@ -626,16 +624,12 @@ static void
xnb_free_communication_mem(struct xnb_softc *xnb)
{
if (xnb->kva != 0) {
#ifndef XENHVM
kva_free(xnb->kva, xnb->kva_size);
#else
if (xnb->pseudo_phys_res != NULL) {
bus_release_resource(xnb->dev, SYS_RES_MEMORY,
xnb->pseudo_phys_res_id,
xnb->pseudo_phys_res);
xnb->pseudo_phys_res = NULL;
}
#endif /* XENHVM */
}
xnb->kva = 0;
xnb->gnt_base_addr = 0;
@ -816,12 +810,7 @@ xnb_alloc_communication_mem(struct xnb_softc *xnb)
for (i=0; i < XNB_NUM_RING_TYPES; i++) {
xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE;
}
#ifndef XENHVM
xnb->kva = kva_alloc(xnb->kva_size);
if (xnb->kva == 0)
return (ENOMEM);
xnb->gnt_base_addr = xnb->kva;
#else /* defined XENHVM */
/*
* Reserve a range of pseudo physical memory that we can map
* into kva. These pages will only be backed by machine
@ -840,7 +829,6 @@ xnb_alloc_communication_mem(struct xnb_softc *xnb)
}
xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res);
xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res);
#endif /* !defined XENHVM */
return (0);
}

View File

@ -879,12 +879,11 @@ network_alloc_rx_buffers(struct netfront_info *sc)
if (sc->copying_receiver == 0) {
gnttab_grant_foreign_transfer_ref(ref,
otherend_id, pfn);
sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn);
sc->rx_pfn_array[nr_flips] = pfn;
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Remove this page before passing
* back to Xen.
*/
set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
MULTI_update_va_mapping(&sc->rx_mcl[i],
vaddr, 0, 0);
}
@ -892,7 +891,7 @@ network_alloc_rx_buffers(struct netfront_info *sc)
} else {
gnttab_grant_foreign_access_ref(ref,
otherend_id,
PFNTOMFN(pfn), 0);
pfn, 0);
}
req->id = id;
req->gref = ref;
@ -907,7 +906,6 @@ network_alloc_rx_buffers(struct netfront_info *sc)
* We may have allocated buffers which have entries outstanding
* in the page * update queue -- make sure we flush those first!
*/
PT_UPDATES_FLUSH();
if (nr_flips != 0) {
#ifdef notyet
/* Tell the ballon driver what is going on. */
@ -1361,8 +1359,6 @@ xennet_get_responses(struct netfront_info *np,
mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) |
MMU_MACHPHYS_UPDATE;
mmu->val = pfn;
set_phys_to_machine(pfn, mfn);
}
pages_flipped++;
} else {
@ -1927,7 +1923,7 @@ network_connect(struct netfront_info *np)
} else {
gnttab_grant_foreign_access_ref(ref,
xenbus_get_otherend_id(np->xbdev),
PFNTOMFN(pfn), 0);
pfn, 0);
}
req->gref = ref;
req->id = requeue_idx;

View File

@ -26,7 +26,6 @@ options GEOM_PART_EBR_COMPAT
options GEOM_PART_MBR
# enable support for native hardware
options NATIVE
device atpic
options NEW_PCIB

View File

@ -1,96 +0,0 @@
#
# XEN -- Kernel configuration for i386 XEN DomU
#
# $FreeBSD$
cpu I686_CPU
ident XEN
makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols
# The following drivers don't build with PAE or XEN enabled.
makeoptions WITHOUT_MODULES="ctl dpt drm drm2 hptmv ida"
# The following drivers don't work with PAE enabled.
makeoptions WITHOUT_MODULES+="ncr pst"
options SCHED_ULE # ULE scheduler
options PREEMPTION # Enable kernel thread preemption
options INET # InterNETworking
options INET6 # IPv6 communications protocols
options SCTP # Stream Control Transmission Protocol
options FFS # Berkeley Fast Filesystem
options SOFTUPDATES # Enable FFS soft updates support
options UFS_ACL # Support for access control lists
options UFS_DIRHASH # Improve performance on big directories
options UFS_GJOURNAL # Enable gjournal-based UFS journaling
options NFSCL # Network Filesystem Client
options NFSD # Network Filesystem Server
options NFSLOCKD # Network Lock Manager
options NFS_ROOT # NFS usable as /, requires NFSCL
options MSDOSFS # MSDOS Filesystem
options CD9660 # ISO 9660 Filesystem
options PROCFS # Process filesystem (requires PSEUDOFS)
options PSEUDOFS # Pseudo-filesystem framework
options GEOM_PART_GPT # GUID Partition Tables.
options GEOM_LABEL # Provides labelization
options COMPAT_FREEBSD4 # Compatible with FreeBSD4
options COMPAT_FREEBSD5 # Compatible with FreeBSD5
options COMPAT_FREEBSD6 # Compatible with FreeBSD6
options COMPAT_FREEBSD7 # Compatible with FreeBSD7
options COMPAT_FREEBSD9 # Compatible with FreeBSD9
options COMPAT_FREEBSD10 # Compatible with FreeBSD10
options KTRACE # ktrace(1) support
options STACK # stack(9) support
options SYSVSHM # SYSV-style shared memory
options SYSVMSG # SYSV-style message queues
options SYSVSEM # SYSV-style semaphores
options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
options KBD_INSTALL_CDEV # install a CDEV entry in /dev
options AUDIT # Security event auditing
# Debugging for use in -current
options KDB # Enable kernel debugger support.
options DDB # Support DDB.
options GDB # Support remote GDB.
options DEADLKRES # Enable the deadlock resolver
options INVARIANTS # Enable calls of extra sanity checking
options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS
options WITNESS # Enable checks to detect deadlocks and cycles
options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed
options PAE
nooption NATIVE
option XEN
nodevice atpic
nodevice isa
options MCLSHIFT=12
# To make an SMP kernel, the next two lines are needed
options SMP # Symmetric MultiProcessor Kernel
device apic # I/O APIC
#device atkbdc # AT keyboard controller
#device atkbd # AT keyboard
device psm # PS/2 mouse
device pci
#device kbdmux # keyboard multiplexer
# Pseudo devices.
device loop # Network loopback
device random # Entropy device
device ether # Ethernet support
device tun # Packet tunnel.
device md # Memory "disks"
device gif # IPv6 and IPv4 tunneling
# Wireless cards
options IEEE80211_SUPPORT_MESH
options AH_SUPPORT_AR5416
# The `bpf' device enables the Berkeley Packet Filter.
# Be aware of the administrative consequences of enabling this!
# Note that 'bpf' is required for DHCP.
device bpf # Berkeley packet filter

View File

@ -266,7 +266,6 @@ IDTVEC(invlcache)
/*
* Handler for IPIs sent via the per-cpu IPI bitmap.
*/
#ifndef XEN
.text
SUPERALIGN_TEXT
IDTVEC(ipi_intr_bitmap_handler)
@ -281,7 +280,7 @@ IDTVEC(ipi_intr_bitmap_handler)
call ipi_bitmap_handler
MEXITCOUNT
jmp doreti
#endif
/*
* Executed by a CPU when it receives an IPI_STOP from another CPU.
*/
@ -301,7 +300,6 @@ IDTVEC(cpustop)
/*
* Executed by a CPU when it receives an IPI_SUSPEND from another CPU.
*/
#ifndef XEN
.text
SUPERALIGN_TEXT
IDTVEC(cpususpend)
@ -314,7 +312,6 @@ IDTVEC(cpususpend)
POP_FRAME
jmp doreti_iret
#endif
/*
* Executed by a CPU when it receives a RENDEZVOUS IPI from another CPU.

View File

@ -238,11 +238,6 @@ ASSYM(BUS_SPACE_HANDLE_BASE, offsetof(struct bus_space_handle, bsh_base));
ASSYM(BUS_SPACE_HANDLE_IAT, offsetof(struct bus_space_handle, bsh_iat));
#endif
#ifdef XEN
ASSYM(PC_CR3, offsetof(struct pcpu, pc_cr3));
ASSYM(XEN_HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_START);
#endif
#ifdef HWPMC_HOOKS
ASSYM(PMC_FN_USER_CALLCHAIN, PMC_FN_USER_CALLCHAIN);
#endif

View File

@ -160,24 +160,6 @@ int arch_i386_is_xbox = 0;
uint32_t arch_i386_xbox_memsize = 0;
#endif
#ifdef XEN
/* XEN includes */
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
#include <machine/xen/xenvar.h>
#include <machine/xen/xenfunc.h>
#include <xen/xen_intr.h>
void Xhypervisor_callback(void);
void failsafe_callback(void);
extern trap_info_t trap_table[];
struct proc_ldt default_proc_ldt;
extern int init_first;
int running_xen = 1;
extern unsigned long physfree;
#endif /* XEN */
/* Sanity check for __curthread() */
CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
@ -356,9 +338,7 @@ cpu_startup(dummy)
*/
bufinit();
vm_pager_bufferinit();
#ifndef XEN
cpu_setregs();
#endif
}
/*
@ -1291,13 +1271,8 @@ SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
int _default_ldt;
#ifdef XEN
union descriptor *gdt;
union descriptor *ldt;
#else
union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
union descriptor ldt[NLDT]; /* local descriptor table */
#endif
static struct gate_descriptor idt0[NIDT];
struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
struct region_descriptor r_gdt, r_idt; /* table descriptors */
@ -1397,7 +1372,6 @@ struct soft_segment_descriptor gdt_segs[] = {
.ssd_xx = 0, .ssd_xx1 = 0,
.ssd_def32 = 1,
.ssd_gran = 1 },
#ifndef XEN
/* GPROC0_SEL 9 Proc 0 Tss Descriptor */
{
.ssd_base = 0x0,
@ -1489,7 +1463,6 @@ struct soft_segment_descriptor gdt_segs[] = {
.ssd_xx = 0, .ssd_xx1 = 0,
.ssd_def32 = 0,
.ssd_gran = 0 },
#endif /* !XEN */
};
static struct soft_segment_descriptor ldt_segs[] = {
@ -1641,7 +1614,7 @@ sdtossd(sd, ssd)
ssd->ssd_gran = sd->sd_gran;
}
#if !defined(PC98) && !defined(XEN)
#if !defined(PC98)
static int
add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
int *physmap_idxp)
@ -1748,9 +1721,8 @@ add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
if (!add_smap_entry(smap, physmap, physmap_idxp))
break;
}
#endif /* !PC98 && !XEN */
#endif /* !PC98 */
#ifndef XEN
static void
basemem_setup(void)
{
@ -1798,7 +1770,6 @@ basemem_setup(void)
for (i = basemem / 4; i < 160; i++)
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
}
#endif /* !XEN */
/*
* Populate the (physmap) array with base/bound pairs describing the
@ -2074,8 +2045,6 @@ getmemsize(int first)
for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
off);
PT_UPDATES_FLUSH();
}
#else /* PC98 */
static void
@ -2086,7 +2055,6 @@ getmemsize(int first)
vm_paddr_t physmap[PHYSMAP_SIZE];
pt_entry_t *pte;
quad_t dcons_addr, dcons_size, physmem_tunable;
#ifndef XEN
int hasbrokenint12, i, res;
u_int extmem;
struct vm86frame vmf;
@ -2094,17 +2062,8 @@ getmemsize(int first)
vm_paddr_t pa;
struct bios_smap *smap, *smapbase;
caddr_t kmdp;
#endif
has_smap = 0;
#if defined(XEN)
Maxmem = xen_start_info->nr_pages - init_first;
physmem = Maxmem;
basemem = 0;
physmap[0] = init_first << PAGE_SHIFT;
physmap[1] = ptoa(Maxmem) - round_page(msgbufsize);
physmap_idx = 0;
#else
#ifdef XBOX
if (arch_i386_is_xbox) {
/*
@ -2247,7 +2206,6 @@ getmemsize(int first)
physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
physmap_done:
#endif
/*
* Now, physmap contains a map of physical memory.
*/
@ -2321,7 +2279,6 @@ getmemsize(int first)
getenv_quad("dcons.size", &dcons_size) == 0)
dcons_addr = 0;
#ifndef XEN
/*
* physmap is in bytes, so when converting to page boundaries,
* round up the start address and round down the end address.
@ -2442,13 +2399,6 @@ getmemsize(int first)
}
*pte = 0;
invltlb();
#else
phys_avail[0] = physfree;
phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
dump_avail[0] = 0;
dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
#endif
/*
* XXX
@ -2472,272 +2422,9 @@ getmemsize(int first)
for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
off);
PT_UPDATES_FLUSH();
}
#endif /* PC98 */
#ifdef XEN
#define MTOPSIZE (1<<(14 + PAGE_SHIFT))
register_t
init386(first)
int first;
{
unsigned long gdtmachpfn;
int error, gsel_tss, metadata_missing, x, pa;
struct pcpu *pc;
#ifdef CPU_ENABLE_SSE
struct xstate_hdr *xhdr;
#endif
struct callback_register event = {
.type = CALLBACKTYPE_event,
.address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
};
struct callback_register failsafe = {
.type = CALLBACKTYPE_failsafe,
.address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
};
thread0.td_kstack = proc0kstack;
thread0.td_kstack_pages = KSTACK_PAGES;
/*
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup0(&proc0, &thread0);
metadata_missing = 0;
if (xen_start_info->mod_start) {
preload_metadata = (caddr_t)xen_start_info->mod_start;
preload_bootstrap_relocate(KERNBASE);
} else {
metadata_missing = 1;
}
if (envmode == 1)
kern_envp = static_env;
else if ((caddr_t)xen_start_info->cmd_line)
kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
boothowto |= xen_boothowto(kern_envp);
/* Init basic tunables, hz etc */
init_param1();
/*
* XEN occupies a portion of the upper virtual address space
* At its base it manages an array mapping machine page frames
* to physical page frames - hence we need to be able to
* access 4GB - (64MB - 4MB + 64k)
*/
gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
pc = &__pcpu[0];
gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
bzero(gdt, PAGE_SIZE);
for (x = 0; x < NGDT; x++)
ssdtosd(&gdt_segs[x], &gdt[x].sd);
mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
lgdt(&r_gdt);
gdtset = 1;
if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
panic("set_trap_table failed - error %d\n", error);
}
error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
if (error == 0)
error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
#if CONFIG_XEN_COMPAT <= 0x030002
if (error == -ENOXENSYS)
HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
(unsigned long)Xhypervisor_callback,
GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
#endif
pcpu_init(pc, 0, sizeof(struct pcpu));
for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
pmap_kenter(pa + KERNBASE, pa);
dpcpu_init((void *)(first + KERNBASE), 0);
first += DPCPU_SIZE;
physfree += DPCPU_SIZE;
init_first += DPCPU_SIZE / PAGE_SIZE;
PCPU_SET(prvspace, pc);
PCPU_SET(curthread, &thread0);
/*
* Initialize mutexes.
*
* icu_lock: in order to allow an interrupt to occur in a critical
* section, to set pcpu->ipending (etc...) properly, we
* must be able to get the icu lock, so it can't be
* under witness.
*/
mutex_init();
mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
/* make ldt memory segments */
PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
bzero(ldt, PAGE_SIZE);
ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
ssdtosd(&ldt_segs[x], &ldt[x].sd);
default_proc_ldt.ldt_base = (caddr_t)ldt;
default_proc_ldt.ldt_len = 6;
_default_ldt = (int)&default_proc_ldt;
PCPU_SET(currentldt, _default_ldt);
PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
#if defined(XEN_PRIVILEGED)
/*
* Initialize the i8254 before the console so that console
* initialization can use DELAY().
*/
i8254_init();
#endif
/*
* Initialize the console before we print anything out.
*/
cninit();
if (metadata_missing)
printf("WARNING: loader(8) metadata is missing!\n");
#ifdef DEV_ISA
#ifdef DEV_ATPIC
elcr_probe();
atpic_startup();
#else
/* Reset and mask the atpics and leave them shut down. */
atpic_reset();
/*
* Point the ICU spurious interrupt vectors at the APIC spurious
* interrupt handler.
*/
setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
#endif
#endif
#ifdef DDB
db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab);
#endif
kdb_init();
#ifdef KDB
if (boothowto & RB_KDB)
kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
finishidentcpu(); /* Final stage of CPU initialization */
setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
initializecpu(); /* Initialize CPU registers */
initializecpucache();
/* pointer to selector slot for %fs/%gs */
PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
#if defined(PAE) || defined(PAE_TABLES)
dblfault_tss.tss_cr3 = (int)IdlePDPT;
#else
dblfault_tss.tss_cr3 = (int)IdlePTD;
#endif
dblfault_tss.tss_eip = (int)dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es =
dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
vm86_initialize();
getmemsize(first);
init_param2(physmem);
/* now running on new page tables, configured,and u/iom is accessible */
msgbufinit(msgbufp, msgbufsize);
#ifdef DEV_NPX
npxinit(true);
#endif
/*
* Set up thread0 pcb after npxinit calculated pcb + fpu save
* area size. Zero out the extended state header in fpu save
* area.
*/
thread0.td_pcb = get_pcb_td(&thread0);
bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
#ifdef CPU_ENABLE_SSE
if (use_xsave) {
xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
1);
xhdr->xstate_bv = xsave_mask;
}
#endif
PCPU_SET(curpcb, thread0.td_pcb);
/* make an initial tss so cpu can get interrupt stack on syscall! */
/* Note: -16 is so we can grow the trapframe if we came from vm86 */
PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
PCPU_GET(common_tss.tss_esp0));
/* transfer to user mode */
_ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
/* setup proc 0's pcb */
thread0.td_pcb->pcb_flags = 0;
#if defined(PAE) || defined(PAE_TABLES)
thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
#else
thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
#endif
thread0.td_pcb->pcb_ext = 0;
thread0.td_frame = &proc0_tf;
thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
cpu_probe_amdc1e();
/* Location of kernel stack for locore */
return ((register_t)thread0.td_pcb);
}
#else
register_t
init386(first)
int first;
@ -3061,7 +2748,6 @@ init386(first)
/* Location of kernel stack for locore */
return ((register_t)thread0.td_pcb);
}
#endif
void
cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)

View File

@ -68,10 +68,6 @@ static void *dump_va;
static uint64_t counter, progress;
CTASSERT(sizeof(*vm_page_dump) == 4);
#ifndef XEN
#define xpmap_mtop(x) (x)
#define xpmap_ptom(x) (x)
#endif
static int
@ -205,7 +201,7 @@ minidumpsys(struct dumperinfo *di)
j = va >> PDRSHIFT;
if ((pd[j] & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
/* This is an entire 2M page. */
pa = xpmap_mtop(pd[j] & PG_PS_FRAME);
pa = pd[j] & PG_PS_FRAME;
for (k = 0; k < NPTEPG; k++) {
if (is_dumpable(pa))
dump_add_page(pa);
@ -215,10 +211,10 @@ minidumpsys(struct dumperinfo *di)
}
if ((pd[j] & PG_V) == PG_V) {
/* set bit for each valid page in this 2MB block */
pt = pmap_kenter_temporary(xpmap_mtop(pd[j] & PG_FRAME), 0);
pt = pmap_kenter_temporary(pd[j] & PG_FRAME, 0);
for (k = 0; k < NPTEPG; k++) {
if ((pt[k] & PG_V) == PG_V) {
pa = xpmap_mtop(pt[k] & PG_FRAME);
pa = pt[k] & PG_FRAME;
if (is_dumpable(pa))
dump_add_page(pa);
}
@ -318,24 +314,8 @@ minidumpsys(struct dumperinfo *di)
continue;
}
if ((pd[j] & PG_V) == PG_V) {
pa = xpmap_mtop(pd[j] & PG_FRAME);
#ifndef XEN
pa = pd[j] & PG_FRAME;
error = blk_write(di, 0, pa, PAGE_SIZE);
#else
pt = pmap_kenter_temporary(pa, 0);
memcpy(fakept, pt, PAGE_SIZE);
for (i = 0; i < NPTEPG; i++)
fakept[i] = xpmap_mtop(fakept[i]);
error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse fakept in the same block */
error = blk_flush(di);
if (error)
goto fail;
bzero(fakept, sizeof(fakept));
#endif
if (error)
goto fail;
} else {

View File

@ -695,11 +695,9 @@ END(bcmp)
*/
/* void lgdt(struct region_descriptor *rdp); */
ENTRY(lgdt)
#ifndef XEN
/* reload the descriptor table */
movl 4(%esp),%eax
lgdt (%eax)
#endif
/* flush the prefetch q */
jmp 1f

View File

@ -88,7 +88,7 @@ ENTRY(cpu_throw)
movl 8(%esp),%ecx /* New thread */
movl TD_PCB(%ecx),%edx
movl PCB_CR3(%edx),%eax
LOAD_CR3(%eax)
movl %eax,%cr3
/* set bit in new pm_active */
movl TD_PROC(%ecx),%eax
movl P_VMSPACE(%eax), %ebx
@ -174,10 +174,10 @@ ENTRY(cpu_switch)
/* switch address space */
movl PCB_CR3(%edx),%eax
READ_CR3(%ebx) /* The same address space? */
movl %cr3,%ebx /* The same address space? */
cmpl %ebx,%eax
je sw0
LOAD_CR3(%eax) /* new address space */
movl %eax,%cr3 /* new address space */
movl %esi,%eax
movl PCPU(CPUID),%esi
SETOP %eax,TD_LOCK(%edi) /* Switchout td_lock */
@ -204,18 +204,6 @@ sw0:
SETOP %esi,TD_LOCK(%edi) /* Switchout td_lock */
sw1:
BLOCK_SPIN(%ecx)
#ifdef XEN
pushl %eax
pushl %ecx
pushl %edx
call xen_handle_thread_switch
popl %edx
popl %ecx
popl %eax
/*
* XXX set IOPL
*/
#else
/*
* At this point, we've switched address spaces and are ready
* to load up the rest of the next context.
@ -264,7 +252,7 @@ sw1:
movl 12(%esi), %ebx
movl %eax, 8(%edi)
movl %ebx, 12(%edi)
#endif
/* Restore context. */
movl PCB_EBX(%edx),%ebx
movl PCB_ESP(%edx),%esp
@ -290,7 +278,7 @@ sw1:
movl _default_ldt,%eax
cmpl PCPU(CURRENTLDT),%eax
je 2f
LLDT(_default_ldt)
lldt _default_ldt
movl %eax,PCPU(CURRENTLDT)
jmp 2f
1:

View File

@ -59,20 +59,6 @@ __FBSDID("$FreeBSD$");
#include <security/audit/audit.h>
#ifdef XEN
#include <machine/xen/xenfunc.h>
void i386_reset_ldt(struct proc_ldt *pldt);
void
i386_reset_ldt(struct proc_ldt *pldt)
{
xen_set_ldt((vm_offset_t)pldt->ldt_base, pldt->ldt_len);
}
#else
#define i386_reset_ldt(x)
#endif
#include <vm/vm_kern.h> /* for kernel_map */
#define MAX_LD 8192
@ -211,12 +197,7 @@ sysarch(td, uap)
*/
sd.sd_lobase = base & 0xffffff;
sd.sd_hibase = (base >> 24) & 0xff;
#ifdef XEN
/* need to do nosegneg like Linux */
sd.sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
#else
sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
#endif
sd.sd_hilimit = 0xf;
sd.sd_type = SDT_MEMRWA;
sd.sd_dpl = SEL_UPL;
@ -226,12 +207,7 @@ sysarch(td, uap)
sd.sd_gran = 1;
critical_enter();
td->td_pcb->pcb_fsd = sd;
#ifdef XEN
HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[0]),
*(uint64_t *)&sd);
#else
PCPU_GET(fsgs_gdt)[0] = sd;
#endif
critical_exit();
td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
}
@ -252,12 +228,7 @@ sysarch(td, uap)
sd.sd_lobase = base & 0xffffff;
sd.sd_hibase = (base >> 24) & 0xff;
#ifdef XEN
/* need to do nosegneg like Linux */
sd.sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
#else
sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
#endif
sd.sd_hilimit = 0xf;
sd.sd_type = SDT_MEMRWA;
sd.sd_dpl = SEL_UPL;
@ -267,12 +238,7 @@ sysarch(td, uap)
sd.sd_gran = 1;
critical_enter();
td->td_pcb->pcb_gsd = sd;
#ifdef XEN
HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[1]),
*(uint64_t *)&sd);
#else
PCPU_GET(fsgs_gdt)[1] = sd;
#endif
critical_exit();
load_gs(GSEL(GUGS_SEL, SEL_UPL));
}
@ -434,10 +400,6 @@ set_user_ldt(struct mdproc *mdp)
}
pldt = mdp->md_ldt;
#ifdef XEN
i386_reset_ldt(pldt);
PCPU_SET(currentldt, (int)pldt);
#else
#ifdef SMP
gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
#else
@ -445,7 +407,6 @@ set_user_ldt(struct mdproc *mdp)
#endif
lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
#endif /* XEN */
if (dtlocked)
mtx_unlock_spin(&dt_lock);
}
@ -464,43 +425,6 @@ set_user_ldt_rv(struct vmspace *vmsp)
}
#endif
#ifdef XEN
/*
* dt_lock must be held. Returns with dt_lock held.
*/
struct proc_ldt *
user_ldt_alloc(struct mdproc *mdp, int len)
{
struct proc_ldt *pldt, *new_ldt;
mtx_assert(&dt_lock, MA_OWNED);
mtx_unlock_spin(&dt_lock);
new_ldt = malloc(sizeof(struct proc_ldt),
M_SUBPROC, M_WAITOK);
new_ldt->ldt_len = len = NEW_MAX_LD(len);
new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
round_page(len * sizeof(union descriptor)), M_WAITOK);
new_ldt->ldt_refcnt = 1;
new_ldt->ldt_active = 0;
mtx_lock_spin(&dt_lock);
if ((pldt = mdp->md_ldt)) {
if (len > pldt->ldt_len)
len = pldt->ldt_len;
bcopy(pldt->ldt_base, new_ldt->ldt_base,
len * sizeof(union descriptor));
} else {
bcopy(ldt, new_ldt->ldt_base, PAGE_SIZE);
}
mtx_unlock_spin(&dt_lock); /* XXX kill once pmap locking fixed. */
pmap_map_readonly(kernel_pmap, (vm_offset_t)new_ldt->ldt_base,
new_ldt->ldt_len*sizeof(union descriptor));
mtx_lock_spin(&dt_lock); /* XXX kill once pmap locking fixed. */
return (new_ldt);
}
#else
/*
* dt_lock must be held. Returns with dt_lock held.
*/
@ -535,7 +459,6 @@ user_ldt_alloc(struct mdproc *mdp, int len)
return (new_ldt);
}
#endif /* !XEN */
/*
* Must be called with dt_lock held. Returns with dt_lock unheld.
@ -553,13 +476,8 @@ user_ldt_free(struct thread *td)
}
if (td == curthread) {
#ifdef XEN
i386_reset_ldt(&default_proc_ldt);
PCPU_SET(currentldt, (int)&default_proc_ldt);
#else
lldt(_default_ldt);
PCPU_SET(currentldt, _default_ldt);
#endif
}
mdp->md_ldt = NULL;
@ -785,27 +703,7 @@ i386_set_ldt(td, uap, descs)
td->td_retval[0] = uap->start;
return (error);
}
#ifdef XEN
static int
i386_set_ldt_data(struct thread *td, int start, int num,
union descriptor *descs)
{
struct mdproc *mdp = &td->td_proc->p_md;
struct proc_ldt *pldt = mdp->md_ldt;
mtx_assert(&dt_lock, MA_OWNED);
while (num) {
xen_update_descriptor(
&((union descriptor *)(pldt->ldt_base))[start],
descs);
num--;
start++;
descs++;
}
return (0);
}
#else
static int
i386_set_ldt_data(struct thread *td, int start, int num,
union descriptor *descs)
@ -821,7 +719,6 @@ i386_set_ldt_data(struct thread *td, int start, int num,
num * sizeof(union descriptor));
return (0);
}
#endif /* !XEN */
static int
i386_ldt_grow(struct thread *td, int len)

View File

@ -89,9 +89,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <vm/vm_param.h>
#ifdef XEN
#include <xen/hypervisor.h>
#endif
#ifdef PC98
#include <pc98/cbus/cbus.h>
#else
@ -304,10 +301,8 @@ cpu_fork(td1, p2, td2, flags)
/* Setup to release spin count in fork_exit(). */
td2->td_md.md_spinlock_count = 1;
/*
* XXX XEN need to check on PSL_USER is handled
*/
td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
/*
* Now, cpu_switch() can schedule the new process.
* pcb_esp is loaded pointing to the cpu_switch() stack frame
@ -698,12 +693,6 @@ cpu_reset_real()
#endif
disable_intr();
#ifdef XEN
if (smp_processor_id() == 0)
HYPERVISOR_shutdown(SHUTDOWN_reboot);
else
HYPERVISOR_shutdown(SHUTDOWN_poweroff);
#endif
#ifdef CPU_ELAN
if (elan_mmcr != NULL)
elan_mmcr->RESCFG = 1;
@ -797,13 +786,8 @@ sf_buf_map(struct sf_buf *sf, int flags)
*/
ptep = vtopte(sf->kva);
opte = *ptep;
#ifdef XEN
PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(sf->m)) | pgeflag
| PG_RW | PG_V | pmap_cache_bits(sf->m->md.pat_mode, 0));
#else
*ptep = VM_PAGE_TO_PHYS(sf->m) | pgeflag | PG_RW | PG_V |
pmap_cache_bits(sf->m->md.pat_mode, 0);
#endif
/*
* Avoid unnecessary TLB invalidations: If the sf_buf's old
@ -854,15 +838,8 @@ sf_buf_shootdown(struct sf_buf *sf, int flags)
int
sf_buf_unmap(struct sf_buf *sf)
{
#ifdef XEN
/*
* Xen doesn't like having dangling R/W mappings
*/
pmap_qremove(sf->kva, 1);
return (1);
#else
return (0);
#endif
}
static void

View File

@ -176,37 +176,6 @@
movl $KPSEL, %eax ; /* reload with per-CPU data segment */ \
movl %eax, %fs
#ifdef XEN
#define LOAD_CR3(reg) \
movl reg,PCPU(CR3); \
pushl %ecx ; \
pushl %edx ; \
pushl %esi ; \
pushl reg ; \
call xen_load_cr3 ; \
addl $4,%esp ; \
popl %esi ; \
popl %edx ; \
popl %ecx ; \
#define READ_CR3(reg) movl PCPU(CR3),reg;
#define LLDT(arg) \
pushl %edx ; \
pushl %eax ; \
xorl %eax,%eax ; \
movl %eax,%gs ; \
call i386_reset_ldt ; \
popl %eax ; \
popl %edx
#define CLI call ni_cli
#else
#define LOAD_CR3(reg) movl reg,%cr3;
#define READ_CR3(reg) movl %cr3,reg;
#define LLDT(arg) lldt arg;
#define CLI cli
#endif /* !XEN */
#endif /* LOCORE */
#ifdef __STDC__

View File

@ -42,17 +42,6 @@
#error this file needs sys/cdefs.h as a prerequisite
#endif
#ifdef XEN
extern void xen_cli(void);
extern void xen_sti(void);
extern u_int xen_rcr2(void);
extern void xen_load_cr3(u_int data);
extern void xen_tlb_flush(void);
extern void xen_invlpg(u_int addr);
extern void write_eflags(u_int eflags);
extern u_int read_eflags(void);
#endif
struct region_descriptor;
#define readb(va) (*(volatile uint8_t *) (va))
@ -106,11 +95,8 @@ clts(void)
static __inline void
disable_intr(void)
{
#ifdef XEN
xen_cli();
#else
__asm __volatile("cli" : : : "memory");
#endif
}
static __inline void
@ -132,11 +118,8 @@ cpuid_count(u_int ax, u_int cx, u_int *p)
static __inline void
enable_intr(void)
{
#ifdef XEN
xen_sti();
#else
__asm __volatile("sti");
#endif
}
static __inline void
@ -325,11 +308,7 @@ ia32_pause(void)
}
static __inline u_int
#ifdef XEN
_read_eflags(void)
#else
read_eflags(void)
#endif
{
u_int ef;
@ -389,11 +368,7 @@ wbinvd(void)
}
static __inline void
#ifdef XEN
_write_eflags(u_int ef)
#else
write_eflags(u_int ef)
#endif
{
__asm __volatile("pushl %0; popfl" : : "r" (ef));
}
@ -425,9 +400,6 @@ rcr2(void)
{
u_int data;
#ifdef XEN
return (xen_rcr2());
#endif
__asm __volatile("movl %%cr2,%0" : "=r" (data));
return (data);
}
@ -435,11 +407,8 @@ rcr2(void)
static __inline void
load_cr3(u_int data)
{
#ifdef XEN
xen_load_cr3(data);
#else
__asm __volatile("movl %0,%%cr3" : : "r" (data) : "memory");
#endif
}
static __inline u_int
@ -491,11 +460,8 @@ load_xcr(u_int reg, uint64_t val)
static __inline void
invltlb(void)
{
#ifdef XEN
xen_tlb_flush();
#else
load_cr3(rcr3());
#endif
}
/*
@ -506,11 +472,7 @@ static __inline void
invlpg(u_int addr)
{
#ifdef XEN
xen_invlpg(addr);
#else
__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
#endif
}
static __inline u_short

View File

@ -58,13 +58,7 @@
(FIRST_MSI_INT + NUM_MSI_INTS)
#define LAST_EVTCHN_INT \
(FIRST_EVTCHN_INT + NUM_EVTCHN_INTS - 1)
#elif defined(XEN)
#include <xen/xen-os.h>
#define NUM_EVTCHN_INTS NR_EVENT_CHANNELS
#define FIRST_EVTCHN_INT 0
#define LAST_EVTCHN_INT \
(FIRST_EVTCHN_INT + NUM_EVTCHN_INTS - 1)
#else /* !XEN && !XENHVM */
#else /* !XENHVM */
#define NUM_EVTCHN_INTS 0
#endif
#define NUM_IO_INTS (FIRST_MSI_INT + NUM_MSI_INTS + NUM_EVTCHN_INTS)

View File

@ -44,34 +44,6 @@
* other processors"
*/
#if defined(XEN)
/* These are peridically updated in shared_info, and then copied here. */
struct shadow_time_info {
uint64_t tsc_timestamp; /* TSC at last update of time vals. */
uint64_t system_timestamp; /* Time, in nanosecs, since boot. */
uint32_t tsc_to_nsec_mul;
uint32_t tsc_to_usec_mul;
int tsc_shift;
uint32_t version;
};
#define PCPU_XEN_FIELDS \
; \
u_int pc_cr3; /* track cr3 for R1/R3*/ \
vm_paddr_t *pc_pdir_shadow; \
uint64_t pc_processed_system_time; \
struct shadow_time_info pc_shadow_time; \
char __pad[185]
#else /* !XEN */
#define PCPU_XEN_FIELDS \
; \
char __pad[233]
#endif
#define PCPU_MD_FIELDS \
char pc_monitorbuf[128] __aligned(128); /* cache line */ \
struct pcpu *pc_prvspace; /* Self-reference */ \
@ -85,8 +57,8 @@ struct shadow_time_info {
u_int pc_apic_id; \
int pc_private_tss; /* Flag indicating private tss*/\
u_int pc_cmci_mask; /* MCx banks for CMCI */ \
u_int pc_vcpu_id /* Xen vCPU ID */ \
PCPU_XEN_FIELDS
u_int pc_vcpu_id; /* Xen vCPU ID */ \
char __pad[233]
#ifdef _KERNEL

View File

@ -219,76 +219,6 @@ extern pd_entry_t *IdlePTD; /* physical address of "Idle" state directory */
*/
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
#if defined(XEN)
#include <sys/param.h>
#include <xen/xen-os.h>
#include <machine/xen/xenvar.h>
#include <machine/xen/xenpmap.h>
extern pt_entry_t pg_nx;
#define PG_KERNEL (PG_V | PG_A | PG_RW | PG_M)
#define MACH_TO_VM_PAGE(ma) PHYS_TO_VM_PAGE(xpmap_mtop((ma)))
#define VM_PAGE_TO_MACH(m) xpmap_ptom(VM_PAGE_TO_PHYS((m)))
#define VTOM(va) xpmap_ptom(VTOP(va))
static __inline vm_paddr_t
pmap_kextract_ma(vm_offset_t va)
{
vm_paddr_t ma;
if ((ma = PTD[va >> PDRSHIFT]) & PG_PS) {
ma = (ma & ~(NBPDR - 1)) | (va & (NBPDR - 1));
} else {
ma = (*vtopte(va) & PG_FRAME) | (va & PAGE_MASK);
}
return ma;
}
static __inline vm_paddr_t
pmap_kextract(vm_offset_t va)
{
return xpmap_mtop(pmap_kextract_ma(va));
}
#define vtomach(va) pmap_kextract_ma(((vm_offset_t) (va)))
vm_paddr_t pmap_extract_ma(struct pmap *pmap, vm_offset_t va);
void pmap_kenter_ma(vm_offset_t va, vm_paddr_t pa);
void pmap_map_readonly(struct pmap *pmap, vm_offset_t va, int len);
void pmap_map_readwrite(struct pmap *pmap, vm_offset_t va, int len);
static __inline pt_entry_t
pte_load_store(pt_entry_t *ptep, pt_entry_t v)
{
pt_entry_t r;
r = *ptep;
PT_SET_VA(ptep, v, TRUE);
return (r);
}
static __inline pt_entry_t
pte_load_store_ma(pt_entry_t *ptep, pt_entry_t v)
{
pt_entry_t r;
r = *ptep;
PT_SET_VA_MA(ptep, v, TRUE);
return (r);
}
#define pte_load_clear(ptep) pte_load_store((ptep), (pt_entry_t)0ULL)
#define pte_store(ptep, pte) pte_load_store((ptep), (pt_entry_t)pte)
#define pte_store_ma(ptep, pte) pte_load_store_ma((ptep), (pt_entry_t)pte)
#define pde_store_ma(ptep, pte) pte_load_store_ma((ptep), (pt_entry_t)pte)
#elif !defined(XEN)
/*
* KPTmap is a linear mapping of the kernel page table. It differs from the
* recursive mapping in two ways: (1) it only provides access to kernel page
@ -328,13 +258,8 @@ pmap_kextract(vm_offset_t va)
}
return (pa);
}
#endif
#if !defined(XEN)
#define PT_UPDATES_FLUSH()
#endif
#if (defined(PAE) || defined(PAE_TABLES)) && !defined(XEN)
#if (defined(PAE) || defined(PAE_TABLES))
#define pde_cmpset(pdep, old, new) atomic_cmpset_64_i586(pdep, old, new)
#define pte_load_store(ptep, pte) atomic_swap_64_i586(ptep, pte)
@ -343,7 +268,7 @@ pmap_kextract(vm_offset_t va)
extern pt_entry_t pg_nx;
#elif !defined(PAE) && !defined(PAE_TABLES) && !defined(XEN)
#else /* !(PAE || PAE_TABLES) */
#define pde_cmpset(pdep, old, new) atomic_cmpset_int(pdep, old, new)
#define pte_load_store(ptep, pte) atomic_swap_int(ptep, pte)
@ -352,7 +277,7 @@ extern pt_entry_t pg_nx;
*(u_int *)(ptep) = (u_int)(pte); \
} while (0)
#endif /* PAE */
#endif /* !(PAE || PAE_TABLES) */
#define pte_clear(ptep) pte_store(ptep, 0)

View File

@ -82,14 +82,8 @@ struct region_descriptor {
#ifdef _KERNEL
extern int _default_ldt;
#ifdef XEN
extern struct proc_ldt default_proc_ldt;
extern union descriptor *gdt;
extern union descriptor *ldt;
#else
extern union descriptor gdt[];
extern union descriptor ldt[NLDT];
#endif
extern struct soft_segment_descriptor gdt_segs[];
extern struct gate_descriptor *idt;
extern struct region_descriptor r_gdt, r_idt;

View File

@ -90,9 +90,7 @@ inthand_t
void assign_cpu_ids(void);
void cpu_add(u_int apic_id, char boot_cpu);
void cpustop_handler(void);
#ifndef XEN
void cpususpend_handler(void);
#endif
void init_secondary_tail(void);
void invltlb_handler(void);
void invlpg_handler(void);
@ -101,9 +99,7 @@ void invlcache_handler(void);
void init_secondary(void);
void ipi_startup(int apic_id, int vector);
void ipi_all_but_self(u_int ipi);
#ifndef XEN
void ipi_bitmap_handler(struct trapframe frame);
#endif
void ipi_cpu(int cpu, u_int ipi);
int ipi_nmi_handler(void);
void ipi_selected(cpuset_t cpus, u_int ipi);
@ -121,9 +117,6 @@ void mem_range_AP_init(void);
void topo_probe(void);
void ipi_send_cpu(int cpu, u_int ipi);
#ifdef XEN
void ipi_to_irq_init(void);
#endif
#endif /* !LOCORE */
#endif /* SMP */

View File

@ -135,11 +135,7 @@
* Kernel physical load address.
*/
#ifndef KERNLOAD
#if defined(XEN) && !defined(XEN_PRIVILEGED_GUEST)
#define KERNLOAD 0
#else
#define KERNLOAD (1 << PDRSHIFT)
#endif
#endif /* !defined(KERNLOAD) */
/*
@ -149,11 +145,7 @@
* messy at times, but hey, we'll do anything to save a page :-)
*/
#ifdef XEN
#define VM_MAX_KERNEL_ADDRESS HYPERVISOR_VIRT_START
#else
#define VM_MAX_KERNEL_ADDRESS VADDR(KPTDI+NKPDE-1, NPTEPG-1)
#endif
#define VM_MIN_KERNEL_ADDRESS VADDR(PTDPTDI, PTDPTDI)

View File

@ -1,22 +0,0 @@
/******************************************************************************
* features.h
*
* Query the features reported by Xen.
*
* Copyright (c) 2006, Ian Campbell
*
* $FreeBSD$
*/
#ifndef __ASM_XEN_FEATURES_H__
#define __ASM_XEN_FEATURES_H__
#include <xen/interface/version.h>
extern void setup_xen_features(void);
extern uint8_t xen_features[XENFEAT_NR_SUBMAPS * 32];
#define xen_feature(flag) (xen_features[flag])
#endif /* __ASM_XEN_FEATURES_H__ */

View File

@ -246,14 +246,8 @@ HYPERVISOR_memory_op(
return _hypercall2(int, memory_op, cmd, arg);
}
#if defined(XEN)
int HYPERVISOR_multicall(multicall_entry_t *, int);
static inline int
_HYPERVISOR_multicall(
#else /* XENHVM */
static inline int
HYPERVISOR_multicall(
#endif
void *call_list, int nr_calls)
{
return _hypercall2(int, multicall, call_list, nr_calls);

View File

@ -44,105 +44,6 @@ static inline void rep_nop(void)
}
#define cpu_relax() rep_nop()
#ifndef XENHVM
#ifdef SMP
extern int gdtset;
#include <sys/time.h> /* XXX for pcpu.h */
#include <sys/pcpu.h> /* XXX for PCPU_GET */
static inline int
smp_processor_id(void)
{
if (__predict_true(gdtset))
return PCPU_GET(cpuid);
return 0;
}
#else
#define smp_processor_id() 0
#endif
#ifndef PANIC_IF
#define PANIC_IF(exp) if (__predict_false(exp)) {printf("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);}
#endif
/*
* Crude memory allocator for memory allocation early in boot.
*/
void *bootmem_alloc(unsigned int size);
void bootmem_free(void *ptr, unsigned int size);
/*
* STI/CLI equivalents. These basically set and clear the virtual
* event_enable flag in the shared_info structure. Note that when
* the enable bit is set, there may be pending events to be handled.
* We may therefore call into do_hypervisor_callback() directly.
*/
#define __cli() \
do { \
vcpu_info_t *_vcpu; \
_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 1; \
barrier(); \
} while (0)
#define __sti() \
do { \
vcpu_info_t *_vcpu; \
barrier(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
_vcpu->evtchn_upcall_mask = 0; \
barrier(); /* unmask then check (avoid races) */ \
if (__predict_false(_vcpu->evtchn_upcall_pending)) \
force_evtchn_callback(); \
} while (0)
#define __restore_flags(x) \
do { \
vcpu_info_t *_vcpu; \
barrier(); \
_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
barrier(); /* unmask then check (avoid races) */ \
if (__predict_false(_vcpu->evtchn_upcall_pending)) \
force_evtchn_callback(); \
} \
} while (0)
/*
* Add critical_{enter, exit}?
*
*/
#define __save_and_cli(x) \
do { \
vcpu_info_t *_vcpu; \
_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
(x) = _vcpu->evtchn_upcall_mask; \
_vcpu->evtchn_upcall_mask = 1; \
barrier(); \
} while (0)
#define cli() __cli()
#define sti() __sti()
#define save_flags(x) __save_flags(x)
#define restore_flags(x) __restore_flags(x)
#define save_and_cli(x) __save_and_cli(x)
#define local_irq_save(x) __save_and_cli(x)
#define local_irq_restore(x) __restore_flags(x)
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()
#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
#define spin_lock_irqsave mtx_lock_irqsave
#define spin_unlock_irqrestore mtx_unlock_irqrestore
#endif /* !XENHVM */
/* This is a barrier for the compiler only, NOT the processor! */
#define barrier() __asm__ __volatile__("": : :"memory")

View File

@ -34,7 +34,6 @@
#include <vm/pmap.h>
#include <machine/xen/xenpmap.h>
#include <machine/segments.h>
#include <sys/pcpu.h>

View File

@ -1,237 +0,0 @@
/*
*
* Copyright (c) 2004 Christian Limpach.
* Copyright (c) 2004,2005 Kip Macy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Christian Limpach.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* $FreeBSD$
*/
#ifndef _XEN_XENPMAP_H_
#define _XEN_XENPMAP_H_
#if defined(XEN)
void _xen_queue_pt_update(vm_paddr_t, vm_paddr_t, char *, int);
void xen_pt_switch(vm_paddr_t);
void xen_set_ldt(vm_paddr_t, unsigned long);
void xen_pgdpt_pin(vm_paddr_t);
void xen_pgd_pin(vm_paddr_t);
void xen_pgd_unpin(vm_paddr_t);
void xen_pt_pin(vm_paddr_t);
void xen_pt_unpin(vm_paddr_t);
void xen_flush_queue(void);
void pmap_ref(pt_entry_t *pte, vm_paddr_t ma);
void pmap_suspend(void);
void pmap_resume(void);
void xen_check_queue(void);
#ifdef INVARIANTS
#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), __FILE__, __LINE__)
#else
#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), NULL, 0)
#endif
#include <sys/param.h>
#include <sys/pcpu.h>
#ifdef PMAP_DEBUG
#define PMAP_REF pmap_ref
#define PMAP_DEC_REF_PAGE pmap_dec_ref_page
#define PMAP_MARK_PRIV pmap_mark_privileged
#define PMAP_MARK_UNPRIV pmap_mark_unprivileged
#else
#define PMAP_MARK_PRIV(a)
#define PMAP_MARK_UNPRIV(a)
#define PMAP_REF(a, b)
#define PMAP_DEC_REF_PAGE(a)
#endif
#define ALWAYS_SYNC 0
#ifdef PT_DEBUG
#define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__)
#else
#define PT_LOG()
#endif
#define INVALID_P2M_ENTRY (~0UL)
#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
#define SH_PD_SET_VA 1
#define SH_PD_SET_VA_MA 2
#define SH_PD_SET_VA_CLEAR 3
struct pmap;
void pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type);
#ifdef notyet
static vm_paddr_t
vptetomachpte(vm_paddr_t *pte)
{
vm_offset_t offset, ppte;
vm_paddr_t pgoffset, retval, *pdir_shadow_ptr;
int pgindex;
ppte = (vm_offset_t)pte;
pgoffset = (ppte & PAGE_MASK);
offset = ppte - (vm_offset_t)PTmap;
pgindex = ppte >> PDRSHIFT;
pdir_shadow_ptr = (vm_paddr_t *)PCPU_GET(pdir_shadow);
retval = (pdir_shadow_ptr[pgindex] & ~PAGE_MASK) + pgoffset;
return (retval);
}
#endif
#define PT_GET(_ptp) \
(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0))
#ifdef WRITABLE_PAGETABLES
#define PT_SET_VA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
PT_LOG(); \
*(_ptp) = xpmap_ptom((_npte)); \
} while (/*CONSTCOND*/0)
#define PT_SET_VA_MA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), (_npte)); \
PT_LOG(); \
*(_ptp) = (_npte); \
} while (/*CONSTCOND*/0)
#define PT_CLEAR_VA(_ptp, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
PT_LOG(); \
*(_ptp) = 0; \
} while (/*CONSTCOND*/0)
#define PD_SET_VA(_pmap, _ptp, _npte, sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do { \
PMAP_REF((_ptp), (_npte)); \
pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_CLEAR_VA(_pmap, _ptp, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#else /* !WRITABLE_PAGETABLES */
#define PT_SET_VA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
xen_queue_pt_update(vtomach(_ptp), \
xpmap_ptom(_npte)); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PT_SET_VA_MA(_ptp,_npte,sync) do { \
PMAP_REF((_ptp), (_npte)); \
xen_queue_pt_update(vtomach(_ptp), _npte); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PT_CLEAR_VA(_ptp, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
xen_queue_pt_update(vtomach(_ptp), 0); \
if (sync || ALWAYS_SYNC) \
xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_SET_VA(_pmap, _ptepindex,_npte,sync) do { \
PMAP_REF((_ptp), xpmap_ptom(_npte)); \
pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_SET_VA_MA(_pmap, _ptepindex,_npte,sync) do { \
PMAP_REF((_ptp), (_npte)); \
pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA_MA); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#define PD_CLEAR_VA(_pmap, _ptepindex, sync) do { \
PMAP_REF((pt_entry_t *)(_ptp), 0); \
pd_set((_pmap),(_ptepindex), 0, SH_PD_SET_VA_CLEAR); \
if (sync || ALWAYS_SYNC) xen_flush_queue(); \
} while (/*CONSTCOND*/0)
#endif
#define PT_SET_MA(_va, _ma) \
do { \
PANIC_IF(HYPERVISOR_update_va_mapping(((unsigned long)(_va)),\
(_ma), \
UVMF_INVLPG| UVMF_ALL) < 0); \
} while (/*CONSTCOND*/0)
#define PT_UPDATES_FLUSH() do { \
xen_flush_queue(); \
} while (/*CONSTCOND*/0)
static __inline vm_paddr_t
xpmap_mtop(vm_paddr_t mpa)
{
vm_paddr_t tmp = (mpa & PG_FRAME);
return machtophys(tmp) | (mpa & ~PG_FRAME);
}
static __inline vm_paddr_t
xpmap_ptom(vm_paddr_t ppa)
{
vm_paddr_t tmp = (ppa & PG_FRAME);
return phystomach(tmp) | (ppa & ~PG_FRAME);
}
static __inline void
set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
#ifdef notyet
PANIC_IF(max_mapnr && pfn >= max_mapnr);
#endif
if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef notyet
PANIC_IF((pfn != mfn && mfn != INVALID_P2M_ENTRY));
#endif
return;
}
xen_phys_machine[pfn] = mfn;
}
static __inline int
phys_to_machine_mapping_valid(unsigned long pfn)
{
return xen_phys_machine[pfn] != INVALID_P2M_ENTRY;
}
#endif /* !XEN */
#endif /* _XEN_XENPMAP_H_ */

View File

@ -1,89 +0,0 @@
/*
* Simple prototyle Xen Store Daemon providing simple tree-like database.
* Copyright (C) 2005 Rusty Russell IBM Corporation
*
* This file may be distributed separately from the Linux kernel, or
* incorporated into other software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _XENSTORED_H
#define _XENSTORED_H
enum xsd_sockmsg_type
{
XS_DEBUG,
XS_SHUTDOWN,
XS_DIRECTORY,
XS_READ,
XS_GET_PERMS,
XS_WATCH,
XS_WATCH_ACK,
XS_UNWATCH,
XS_TRANSACTION_START,
XS_TRANSACTION_END,
XS_OP_READ_ONLY = XS_TRANSACTION_END,
XS_INTRODUCE,
XS_RELEASE,
XS_GETDOMAINPATH,
XS_WRITE,
XS_MKDIR,
XS_RM,
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
};
#define XS_WRITE_NONE "NONE"
#define XS_WRITE_CREATE "CREATE"
#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
/* We hand errors as strings, for portability. */
struct xsd_errors
{
int errnum;
const char *errstring;
};
#define XSD_ERROR(x) { x, #x }
static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
XSD_ERROR(EINVAL),
XSD_ERROR(EACCES),
XSD_ERROR(EEXIST),
XSD_ERROR(EISDIR),
XSD_ERROR(ENOENT),
XSD_ERROR(ENOMEM),
XSD_ERROR(ENOSPC),
XSD_ERROR(EIO),
XSD_ERROR(ENOTEMPTY),
XSD_ERROR(ENOSYS),
XSD_ERROR(EROFS),
XSD_ERROR(EBUSY),
XSD_ERROR(ETIMEDOUT),
XSD_ERROR(EISCONN),
};
struct xsd_sockmsg
{
uint32_t type;
uint32_t len; /* Length of data following this. */
/* Generally followed by nul-terminated string(s). */
};
#endif /* _XENSTORED_H */

View File

@ -29,91 +29,8 @@
#ifndef XENVAR_H_
#define XENVAR_H_
#include <machine/xen/features.h>
#if defined(XEN)
#define XBOOTUP 0x1
#define XPMAP 0x2
extern int xendebug_flags;
#ifndef NOXENDEBUG
/* Print directly to the Xen console during debugging. */
#define XENPRINTF xc_printf
#else
#define XENPRINTF printf
#endif
extern xen_pfn_t *xen_phys_machine;
extern xen_pfn_t *xen_pfn_to_mfn_frame_list[16];
extern xen_pfn_t *xen_pfn_to_mfn_frame_list_list;
#if 0
#define TRACE_ENTER XENPRINTF("(file=%s, line=%d) entered %s\n", __FILE__, __LINE__, __FUNCTION__)
#define TRACE_EXIT XENPRINTF("(file=%s, line=%d) exiting %s\n", __FILE__, __LINE__, __FUNCTION__)
#define TRACE_DEBUG(argflags, _f, _a...) \
if (xendebug_flags & argflags) XENPRINTF("(file=%s, line=%d) " _f "\n", __FILE__, __LINE__, ## _a);
#else
#define TRACE_ENTER
#define TRACE_EXIT
#define TRACE_DEBUG(argflags, _f, _a...)
#endif
extern xen_pfn_t *xen_machine_phys;
/* Xen starts physical pages after the 4MB ISA hole -
* FreeBSD doesn't
*/
#undef ADD_ISA_HOLE /* XXX */
#ifdef ADD_ISA_HOLE
#define ISA_INDEX_OFFSET 1024
#define ISA_PDR_OFFSET 1
#else
#define ISA_INDEX_OFFSET 0
#define ISA_PDR_OFFSET 0
#endif
#define PFNTOMFN(i) (xen_phys_machine[(i)])
#define MFNTOPFN(i) ((vm_paddr_t)xen_machine_phys[(i)])
#define VTOP(x) ((((uintptr_t)(x))) - KERNBASE)
#define PTOV(x) (((uintptr_t)(x)) + KERNBASE)
#define VTOPFN(x) (VTOP(x) >> PAGE_SHIFT)
#define PFNTOV(x) PTOV((vm_paddr_t)(x) << PAGE_SHIFT)
#define VTOMFN(va) (vtomach(va) >> PAGE_SHIFT)
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define phystomach(pa) (((vm_paddr_t)(PFNTOMFN((pa) >> PAGE_SHIFT))) << PAGE_SHIFT)
#define machtophys(ma) (((vm_paddr_t)(MFNTOPFN((ma) >> PAGE_SHIFT))) << PAGE_SHIFT)
void xpq_init(void);
#define BITS_PER_LONG 32
#define NR_CPUS XEN_LEGACY_MAX_VCPUS
#define BITS_TO_LONGS(bits) \
(((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
int xen_create_contiguous_region(vm_page_t pages, int npages);
void xen_destroy_contiguous_region(void * addr, int npages);
#elif defined(XENHVM)
#include <xen/features.h>
#define vtomach(va) pmap_kextract((vm_offset_t) (va))
#define PFNTOMFN(pa) (pa)
#define MFNTOPFN(ma) (ma)
#define set_phys_to_machine(pfn, mfn) ((void)0)
#define phys_to_machine_mapping_valid(pfn) (TRUE)
#endif /* !XEN && !XENHVM */
#endif

View File

@ -69,10 +69,6 @@ __FBSDID("$FreeBSD$");
#include <machine/ucontext.h>
#include <machine/intr_machdep.h>
#ifdef XEN
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
#endif
#ifdef DEV_ISA
#include <isa/isavar.h>
@ -157,13 +153,8 @@ void xsaveopt(char *addr, uint64_t mask);
#endif /* __GNUCLIKE_ASM && !lint */
#ifdef XEN
#define start_emulating() (HYPERVISOR_fpu_taskswitch(1))
#define stop_emulating() (HYPERVISOR_fpu_taskswitch(0))
#else
#define start_emulating() load_cr0(rcr0() | CR0_TS)
#define stop_emulating() clts()
#endif
#ifdef CPU_ENABLE_SSE
#define GET_FPU_CW(thread) \

View File

@ -93,9 +93,7 @@ static uint32_t pci_docfgregread(int bus, int slot, int func, int reg,
int bytes);
static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes);
static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes);
#ifndef XEN
static int pcireg_cfgopen(void);
#endif
static int pciereg_cfgread(int bus, unsigned slot, unsigned func,
unsigned reg, unsigned bytes);
static void pciereg_cfgwrite(int bus, unsigned slot, unsigned func,
@ -116,7 +114,6 @@ pci_i386_map_intline(int line)
return (line);
}
#ifndef XEN
static u_int16_t
pcibios_get_version(void)
{
@ -137,7 +134,6 @@ pcibios_get_version(void)
}
return (args.ebx & 0xffff);
}
#endif
/*
* Initialise access to PCI configuration space
@ -145,9 +141,6 @@ pcibios_get_version(void)
int
pci_cfgregopen(void)
{
#ifdef XEN
return (0);
#else
static int opened = 0;
uint64_t pciebar;
u_int16_t vid, did;
@ -202,7 +195,6 @@ pci_cfgregopen(void)
}
return(1);
#endif
}
static uint32_t
@ -390,7 +382,6 @@ pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes)
mtx_unlock_spin(&pcicfg_mtx);
}
#ifndef XEN
/* check whether the configuration mechanism has been correctly identified */
static int
pci_cfgcheck(int maxdev)
@ -607,7 +598,6 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus)
return (1);
}
#endif /* !XEN */
#define PCIE_PADDR(base, reg, bus, slot, func) \
((base) + \

View File

@ -137,9 +137,6 @@ pci_pir_open(void)
int i;
uint8_t ck, *cv;
#ifdef XEN
return;
#else
/* Don't try if we've already found a table. */
if (pci_route_table != NULL)
return;
@ -150,7 +147,7 @@ pci_pir_open(void)
sigaddr = bios_sigsearch(0, "_PIR", 4, 16, 0);
if (sigaddr == 0)
return;
#endif
/* If we found something, check the checksum and length. */
/* XXX - Use pmap_mapdev()? */
pt = (struct PIR_table *)(uintptr_t)BIOS_PADDRTOVADDR(sigaddr);
@ -481,11 +478,7 @@ pci_pir_biosroute(int bus, int device, int func, int pin, int irq)
args.eax = PCIBIOS_ROUTE_INTERRUPT;
args.ebx = (bus << 8) | (device << 3) | func;
args.ecx = (irq << 8) | (0xa + pin);
#ifdef XEN
return (0);
#else
return (bios32(&args, PCIbios.ventry, GSEL(GCODE_SEL, SEL_KPL)));
#endif
}

View File

@ -1,570 +0,0 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz and Don Ahn.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)clock.c 7.2 (Berkeley) 5/12/91
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* #define DELAYDEBUG */
/*
* Routines to handle clock hardware.
*/
#include "opt_ddb.h"
#include "opt_clock.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/clock.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/timeet.h>
#include <sys/timetc.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/sysctl.h>
#include <sys/cons.h>
#include <sys/power.h>
#include <machine/clock.h>
#include <machine/cputypes.h>
#include <machine/frame.h>
#include <machine/intr_machdep.h>
#include <machine/md_var.h>
#include <machine/psl.h>
#include <machine/pvclock.h>
#if defined(SMP)
#include <machine/smp.h>
#endif
#include <machine/specialreg.h>
#include <machine/timerreg.h>
#include <x86/isa/icu.h>
#include <isa/isareg.h>
#include <isa/rtc.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/pmap.h>
#include <xen/hypervisor.h>
#include <xen/xen-os.h>
#include <machine/xen/xenfunc.h>
#include <xen/interface/vcpu.h>
#include <machine/cpu.h>
#include <xen/xen_intr.h>
/*
* 32-bit time_t's can't reach leap years before 1904 or after 2036, so we
* can use a simple formula for leap years.
*/
#define LEAPYEAR(y) (!((y) % 4))
#define DAYSPERYEAR (28+30*4+31*7)
#ifndef TIMER_FREQ
#define TIMER_FREQ 1193182
#endif
#ifdef CYC2NS_SCALE_FACTOR
#undef CYC2NS_SCALE_FACTOR
#endif
#define CYC2NS_SCALE_FACTOR 10
/* Values for timerX_state: */
#define RELEASED 0
#define RELEASE_PENDING 1
#define ACQUIRED 2
#define ACQUIRE_PENDING 3
struct mtx clock_lock;
#define RTC_LOCK_INIT \
mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_NOPROFILE)
#define RTC_LOCK mtx_lock_spin(&clock_lock)
#define RTC_UNLOCK mtx_unlock_spin(&clock_lock)
#define NS_PER_TICK (1000000000ULL/hz)
int adjkerntz; /* local offset from UTC in seconds */
int clkintr_pending;
int pscnt = 1;
int psdiv = 1;
int wall_cmos_clock;
u_int timer_freq = TIMER_FREQ;
static u_long cyc2ns_scale;
static uint64_t processed_system_time; /* stime (ns) at last processing. */
#define do_div(n,base) ({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
__asm("":"=a" (__low), "=d" (__high):"A" (n)); \
__upper = __high; \
if (__high) { \
__upper = __high % (__base); \
__high = __high / (__base); \
} \
__asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
__asm("":"=A" (n):"a" (__low),"d" (__high)); \
__mod; \
})
/* convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
* ns = cycles / (freq / ns_per_sec)
* ns = cycles * (ns_per_sec / freq)
* ns = cycles * (10^9 / (cpu_mhz * 10^6))
* ns = cycles * (10^3 / cpu_mhz)
*
* Then we use scaling math (suggested by george@mvista.com) to get:
* ns = cycles * (10^3 * SC / cpu_mhz) / SC
* ns = cycles * cyc2ns_scale / SC
*
* And since SC is a constant power of two, we can convert the div
* into a shift.
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
{
cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
return ((cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR);
}
static uint32_t
getit(void)
{
return (pvclock_get_last_cycles());
}
/*
* XXX: timer needs more SMP work.
*/
void
i8254_init(void)
{
RTC_LOCK_INIT;
}
/*
* Wait "n" microseconds.
* Relies on timer 1 counting down from (timer_freq / hz)
* Note: timer had better have been programmed before this is first used!
*/
void
i8254_delay(int n)
{
int delta, ticks_left;
uint32_t tick, prev_tick;
#ifdef DELAYDEBUG
int getit_calls = 1;
int n1;
static int state = 0;
if (state == 0) {
state = 1;
for (n1 = 1; n1 <= 10000000; n1 *= 10)
DELAY(n1);
state = 2;
}
if (state == 1)
printf("DELAY(%d)...", n);
#endif
/*
* Read the counter first, so that the rest of the setup overhead is
* counted. Guess the initial overhead is 20 usec (on most systems it
* takes about 1.5 usec for each of the i/o's in getit(). The loop
* takes about 6 usec on a 486/33 and 13 usec on a 386/20. The
* multiplications and divisions to scale the count take a while).
*
* However, if ddb is active then use a fake counter since reading
* the i8254 counter involves acquiring a lock. ddb must not go
* locking for many reasons, but it calls here for at least atkbd
* input.
*/
prev_tick = getit();
n -= 0; /* XXX actually guess no initial overhead */
/*
* Calculate (n * (timer_freq / 1e6)) without using floating point
* and without any avoidable overflows.
*/
if (n <= 0)
ticks_left = 0;
else if (n < 256)
/*
* Use fixed point to avoid a slow division by 1000000.
* 39099 = 1193182 * 2^15 / 10^6 rounded to nearest.
* 2^15 is the first power of 2 that gives exact results
* for n between 0 and 256.
*/
ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15;
else
/*
* Don't bother using fixed point, although gcc-2.7.2
* generates particularly poor code for the long long
* division, since even the slow way will complete long
* before the delay is up (unless we're interrupted).
*/
ticks_left = ((u_int)n * (long long)timer_freq + 999999)
/ 1000000;
while (ticks_left > 0) {
tick = getit();
#ifdef DELAYDEBUG
++getit_calls;
#endif
delta = tick - prev_tick;
prev_tick = tick;
if (delta < 0) {
/*
* Guard against timer0_max_count being wrong.
* This shouldn't happen in normal operation,
* but it may happen if set_timer_freq() is
* traced.
*/
/* delta += timer0_max_count; ??? */
if (delta < 0)
delta = 0;
}
ticks_left -= delta;
}
#ifdef DELAYDEBUG
if (state == 1)
printf(" %d calls to getit() at %d usec each\n",
getit_calls, (n + 5) / getit_calls);
#endif
}
void
startrtclock()
{
uint64_t __cpu_khz;
uint32_t cpu_khz;
struct vcpu_time_info *info;
__cpu_khz = 1000000ULL << 32;
info = &HYPERVISOR_shared_info->vcpu_info[0].time;
(void)do_div(__cpu_khz, info->tsc_to_system_mul);
if ( info->tsc_shift < 0 )
cpu_khz = __cpu_khz << -info->tsc_shift;
else
cpu_khz = __cpu_khz >> info->tsc_shift;
printf("Xen reported: %u.%03u MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
/* (10^6 * 2^32) / cpu_hz = (10^3 * 2^32) / cpu_khz =
(2^32 * 1 / (clocks/us)) */
set_cyc2ns_scale(cpu_khz/1000);
tsc_freq = cpu_khz * 1000;
}
/*
* RTC support routines
*/
static __inline int
readrtc(int port)
{
return(bcd2bin(rtcin(port)));
}
#ifdef XEN_PRIVILEGED_GUEST
/*
* Initialize the time of day register, based on the time base which is, e.g.
* from a filesystem.
*/
static void
domu_inittodr(time_t base)
{
unsigned long sec;
int s, y;
struct timespec ts;
update_wallclock();
add_uptime_to_wallclock();
RTC_LOCK;
if (base) {
ts.tv_sec = base;
ts.tv_nsec = 0;
tc_setclock(&ts);
}
sec += tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
y = time_second - shadow_tv.tv_sec;
if (y <= -2 || y >= 2) {
/* badly off, adjust it */
tc_setclock(&shadow_tv);
}
RTC_UNLOCK;
}
/*
* Write system time back to RTC.
*/
static void
domu_resettodr(void)
{
unsigned long tm;
int s;
dom0_op_t op;
struct shadow_time_info *shadow;
struct pcpu *pc;
pc = pcpu_find(smp_processor_id());
shadow = &pc->pc_shadow_time;
if (xen_disable_rtc_set)
return;
s = splclock();
tm = time_second;
splx(s);
tm -= tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
if ((xen_start_info->flags & SIF_INITDOMAIN) &&
!independent_wallclock)
{
op.cmd = DOM0_SETTIME;
op.u.settime.secs = tm;
op.u.settime.nsecs = 0;
op.u.settime.system_time = shadow->system_timestamp;
HYPERVISOR_dom0_op(&op);
update_wallclock();
add_uptime_to_wallclock();
} else if (independent_wallclock) {
/* notyet */
;
}
}
/*
* Initialize the time of day register, based on the time base which is, e.g.
* from a filesystem.
*/
void
inittodr(time_t base)
{
unsigned long sec, days;
int year, month;
int y, m, s;
struct timespec ts;
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
domu_inittodr(base);
return;
}
if (base) {
s = splclock();
ts.tv_sec = base;
ts.tv_nsec = 0;
tc_setclock(&ts);
splx(s);
}
/* Look if we have a RTC present and the time is valid */
if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
goto wrong_time;
/* wait for time update to complete */
/* If RTCSA_TUP is zero, we have at least 244us before next update */
s = splhigh();
while (rtcin(RTC_STATUSA) & RTCSA_TUP) {
splx(s);
s = splhigh();
}
days = 0;
#ifdef USE_RTC_CENTURY
year = readrtc(RTC_YEAR) + readrtc(RTC_CENTURY) * 100;
#else
year = readrtc(RTC_YEAR) + 1900;
if (year < 1970)
year += 100;
#endif
if (year < 1970) {
splx(s);
goto wrong_time;
}
month = readrtc(RTC_MONTH);
for (m = 1; m < month; m++)
days += daysinmonth[m-1];
if ((month > 2) && LEAPYEAR(year))
days ++;
days += readrtc(RTC_DAY) - 1;
for (y = 1970; y < year; y++)
days += DAYSPERYEAR + LEAPYEAR(y);
sec = ((( days * 24 +
readrtc(RTC_HRS)) * 60 +
readrtc(RTC_MIN)) * 60 +
readrtc(RTC_SEC));
/* sec now contains the number of seconds, since Jan 1 1970,
in the local time zone */
sec += tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
y = time_second - sec;
if (y <= -2 || y >= 2) {
/* badly off, adjust it */
ts.tv_sec = sec;
ts.tv_nsec = 0;
tc_setclock(&ts);
}
splx(s);
return;
wrong_time:
printf("Invalid time in real time clock.\n");
printf("Check and reset the date immediately!\n");
}
/*
* Write system time back to RTC
*/
void
resettodr()
{
unsigned long tm;
int y, m, s;
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
domu_resettodr();
return;
}
if (xen_disable_rtc_set)
return;
s = splclock();
tm = time_second;
splx(s);
/* Disable RTC updates and interrupts. */
writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR);
/* Calculate local time to put in RTC */
tm -= tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
writertc(RTC_SEC, bin2bcd(tm%60)); tm /= 60; /* Write back Seconds */
writertc(RTC_MIN, bin2bcd(tm%60)); tm /= 60; /* Write back Minutes */
writertc(RTC_HRS, bin2bcd(tm%24)); tm /= 24; /* Write back Hours */
/* We have now the days since 01-01-1970 in tm */
writertc(RTC_WDAY, (tm + 4) % 7 + 1); /* Write back Weekday */
for (y = 1970, m = DAYSPERYEAR + LEAPYEAR(y);
tm >= m;
y++, m = DAYSPERYEAR + LEAPYEAR(y))
tm -= m;
/* Now we have the years in y and the day-of-the-year in tm */
writertc(RTC_YEAR, bin2bcd(y%100)); /* Write back Year */
#ifdef USE_RTC_CENTURY
writertc(RTC_CENTURY, bin2bcd(y/100)); /* ... and Century */
#endif
for (m = 0; ; m++) {
int ml;
ml = daysinmonth[m];
if (m == 1 && LEAPYEAR(y))
ml++;
if (tm < ml)
break;
tm -= ml;
}
writertc(RTC_MONTH, bin2bcd(m + 1)); /* Write back Month */
writertc(RTC_DAY, bin2bcd(tm + 1)); /* Write back Month Day */
/* Reenable RTC updates and interrupts. */
writertc(RTC_STATUSB, RTCSB_24HR);
rtcin(RTC_INTR);
}
#endif
/*
* Start clocks running.
*/
void
cpu_initclocks(void)
{
cpu_initclocks_bsp();
}
/* Return system time offset by ticks */
uint64_t
get_system_time(int ticks)
{
return (processed_system_time + (ticks * NS_PER_TICK));
}
int
timer_spkr_acquire(void)
{
return (0);
}
int
timer_spkr_release(void)
{
return (0);
}
void
timer_spkr_setfreq(int freq)
{
}

View File

@ -1,494 +0,0 @@
/*-
* Copyright (c) 1989, 1990 William F. Jolitz.
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "opt_apic.h"
#include "opt_npx.h"
#include <machine/asmacros.h>
#include <machine/psl.h>
#include <machine/trap.h>
#include "assym.s"
#define SEL_RPL_MASK 0x0002
#define __HYPERVISOR_iret 23
/* Offsets into shared_info_t. */
#define evtchn_upcall_pending /* 0 */
#define evtchn_upcall_mask 1
#define sizeof_vcpu_shift 6
#ifdef SMP
#define GET_VCPU_INFO(reg) movl PCPU(CPUID),reg ; \
shl $sizeof_vcpu_shift,reg ; \
addl HYPERVISOR_shared_info,reg
#else
#define GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
#endif
#define __DISABLE_INTERRUPTS(reg) movb $1,evtchn_upcall_mask(reg)
#define __ENABLE_INTERRUPTS(reg) movb $0,evtchn_upcall_mask(reg)
#define DISABLE_INTERRUPTS(reg) GET_VCPU_INFO(reg) ; \
__DISABLE_INTERRUPTS(reg)
#define ENABLE_INTERRUPTS(reg) GET_VCPU_INFO(reg) ; \
__ENABLE_INTERRUPTS(reg)
#define __TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
#define POPA \
popl %edi; \
popl %esi; \
popl %ebp; \
popl %ebx; \
popl %ebx; \
popl %edx; \
popl %ecx; \
popl %eax;
.text
/*****************************************************************************/
/* Trap handling */
/*****************************************************************************/
/*
* Trap and fault vector routines.
*
* Most traps are 'trap gates', SDT_SYS386TGT. A trap gate pushes state on
* the stack that mostly looks like an interrupt, but does not disable
* interrupts. A few of the traps we are use are interrupt gates,
* SDT_SYS386IGT, which are nearly the same thing except interrupts are
* disabled on entry.
*
* The cpu will push a certain amount of state onto the kernel stack for
* the current process. The amount of state depends on the type of trap
* and whether the trap crossed rings or not. See i386/include/frame.h.
* At the very least the current EFLAGS (status register, which includes
* the interrupt disable state prior to the trap), the code segment register,
* and the return instruction pointer are pushed by the cpu. The cpu
* will also push an 'error' code for certain traps. We push a dummy
* error code for those traps where the cpu doesn't in order to maintain
* a consistent frame. We also push a contrived 'trap number'.
*
* The cpu does not push the general registers, we must do that, and we
* must restore them prior to calling 'iret'. The cpu adjusts the %cs and
* %ss segment registers, but does not mess with %ds, %es, or %fs. Thus we
* must load them with appropriate values for supervisor mode operation.
*/
MCOUNT_LABEL(user)
MCOUNT_LABEL(btrap)
#define TRAP(a) pushl $(a) ; jmp alltraps
IDTVEC(div)
pushl $0; TRAP(T_DIVIDE)
IDTVEC(dbg)
pushl $0; TRAP(T_TRCTRAP)
IDTVEC(nmi)
pushl $0; TRAP(T_NMI)
IDTVEC(bpt)
pushl $0; TRAP(T_BPTFLT)
IDTVEC(ofl)
pushl $0; TRAP(T_OFLOW)
IDTVEC(bnd)
pushl $0; TRAP(T_BOUND)
IDTVEC(ill)
pushl $0; TRAP(T_PRIVINFLT)
IDTVEC(dna)
pushl $0; TRAP(T_DNA)
IDTVEC(fpusegm)
pushl $0; TRAP(T_FPOPFLT)
IDTVEC(tss)
TRAP(T_TSSFLT)
IDTVEC(missing)
TRAP(T_SEGNPFLT)
IDTVEC(stk)
TRAP(T_STKFLT)
IDTVEC(prot)
TRAP(T_PROTFLT)
IDTVEC(page)
TRAP(T_PAGEFLT)
IDTVEC(mchk)
pushl $0; TRAP(T_MCHK)
IDTVEC(rsvd)
pushl $0; TRAP(T_RESERVED)
IDTVEC(fpu)
pushl $0; TRAP(T_ARITHTRAP)
IDTVEC(align)
TRAP(T_ALIGNFLT)
IDTVEC(xmm)
pushl $0; TRAP(T_XMMFLT)
IDTVEC(hypervisor_callback)
pushl $0;
pushl $0;
pushal
pushl %ds
pushl %es
pushl %fs
upcall_with_regs_pushed:
SET_KERNEL_SREGS
FAKE_MCOUNT(TF_EIP(%esp))
call_evtchn_upcall:
movl TF_EIP(%esp),%eax
cmpl $scrit,%eax
jb 10f
cmpl $ecrit,%eax
jb critical_region_fixup
10: pushl %esp
call xen_intr_handle_upcall
addl $4,%esp
/*
* Return via doreti to handle ASTs.
*/
MEXITCOUNT
jmp doreti
hypervisor_callback_pending:
DISABLE_INTERRUPTS(%esi) /* cli */
jmp 10b
/*
* alltraps entry point. Interrupts are enabled if this was a trap
* gate (TGT), else disabled if this was an interrupt gate (IGT).
* Note that int0x80_syscall is a trap gate. Only page faults
* use an interrupt gate.
*/
SUPERALIGN_TEXT
.globl alltraps
.type alltraps,@function
alltraps:
pushal
pushl %ds
pushl %es
pushl %fs
alltraps_with_regs_pushed:
SET_KERNEL_SREGS
FAKE_MCOUNT(TF_EIP(%esp))
calltrap:
push %esp
call trap
add $4, %esp
/*
* Return via doreti to handle ASTs.
*/
MEXITCOUNT
jmp doreti
/*
* SYSCALL CALL GATE (old entry point for a.out binaries)
*
* The intersegment call has been set up to specify one dummy parameter.
*
* This leaves a place to put eflags so that the call frame can be
* converted to a trap frame. Note that the eflags is (semi-)bogusly
* pushed into (what will be) tf_err and then copied later into the
* final spot. It has to be done this way because esp can't be just
* temporarily altered for the pushfl - an interrupt might come in
* and clobber the saved cs/eip.
*/
SUPERALIGN_TEXT
IDTVEC(lcall_syscall)
pushfl /* save eflags */
popl 8(%esp) /* shuffle into tf_eflags */
pushl $7 /* sizeof "lcall 7,0" */
subl $4,%esp /* skip over tf_trapno */
pushal
pushl %ds
pushl %es
pushl %fs
SET_KERNEL_SREGS
FAKE_MCOUNT(TF_EIP(%esp))
pushl %esp
call syscall
add $4, %esp
MEXITCOUNT
jmp doreti
/*
* Call gate entry for FreeBSD ELF and Linux/NetBSD syscall (int 0x80)
*
* Even though the name says 'int0x80', this is actually a TGT (trap gate)
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
* entry just as they are for a normal syscall.
*/
SUPERALIGN_TEXT
IDTVEC(int0x80_syscall)
pushl $2 /* sizeof "int 0x80" */
pushl $0xBEEF /* for debug */
pushal
pushl %ds
pushl %es
pushl %fs
SET_KERNEL_SREGS
FAKE_MCOUNT(TF_EIP(%esp))
pushl %esp
call syscall
add $4, %esp
MEXITCOUNT
jmp doreti
ENTRY(fork_trampoline)
pushl %esp /* trapframe pointer */
pushl %ebx /* arg1 */
pushl %esi /* function */
call fork_exit
addl $12,%esp
/* cut from syscall */
/*
* Return via doreti to handle ASTs.
*/
MEXITCOUNT
jmp doreti
/*
* To efficiently implement classification of trap and interrupt handlers
* for profiling, there must be only trap handlers between the labels btrap
* and bintr, and only interrupt handlers between the labels bintr and
* eintr. This is implemented (partly) by including files that contain
* some of the handlers. Before including the files, set up a normal asm
* environment so that the included files doen't need to know that they are
* included.
*/
.data
.p2align 4
.text
SUPERALIGN_TEXT
MCOUNT_LABEL(bintr)
#ifdef DEV_APIC
.data
.p2align 4
.text
SUPERALIGN_TEXT
#include <i386/i386/apic_vector.s>
#endif
.data
.p2align 4
.text
SUPERALIGN_TEXT
#include <i386/i386/vm86bios.s>
.text
MCOUNT_LABEL(eintr)
/*
* void doreti(struct trapframe)
*
* Handle return from interrupts, traps and syscalls.
*/
.text
SUPERALIGN_TEXT
.type doreti,@function
doreti:
FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */
doreti_next:
#ifdef notyet
/*
* Check if ASTs can be handled now. PSL_VM must be checked first
* since segment registers only have an RPL in non-VM86 mode.
*/
testl $PSL_VM,TF_EFLAGS(%esp) /* are we in vm86 mode? */
jz doreti_notvm86
movl PCPU(CURPCB),%ecx
testl $PCB_VM86CALL,PCB_FLAGS(%ecx) /* are we in a vm86 call? */
jz doreti_ast /* can handle ASTS now if not */
jmp doreti_exit
doreti_notvm86:
#endif
testb $SEL_RPL_MASK,TF_CS(%esp) /* are we returning to user mode? */
jz doreti_exit /* can't handle ASTs now if not */
doreti_ast:
/*
* Check for ASTs atomically with returning. Disabling CPU
* interrupts provides sufficient locking even in the SMP case,
* since we will be informed of any new ASTs by an IPI.
*/
DISABLE_INTERRUPTS(%esi) /* cli */
movl PCPU(CURTHREAD),%eax
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
je doreti_exit
ENABLE_INTERRUPTS(%esi) /* sti */
pushl %esp /* pass a pointer to the trapframe */
call ast
add $4,%esp
jmp doreti_ast
/*
* doreti_exit: pop registers, iret.
*
* The segment register pop is a special case, since it may
* fault if (for example) a sigreturn specifies bad segment
* registers. The fault is handled in trap.c.
*/
doreti_exit:
ENABLE_INTERRUPTS(%esi) # reenable event callbacks (sti)
.globl scrit
scrit:
__TEST_PENDING(%esi)
jnz hypervisor_callback_pending /* More to go */
MEXITCOUNT
.globl doreti_popl_fs
doreti_popl_fs:
popl %fs
.globl doreti_popl_es
doreti_popl_es:
popl %es
.globl doreti_popl_ds
doreti_popl_ds:
popl %ds
/*
* This is important: as nothing is atomic over here (we can get
* interrupted any time), we use the critical_region_fixup() in
* order to figure out where out stack is. Therefore, do NOT use
* 'popal' here without fixing up the table!
*/
POPA
addl $8,%esp
.globl doreti_iret
doreti_iret:
jmp hypercall_page + (__HYPERVISOR_iret * 32)
.globl ecrit
ecrit:
/*
* doreti_iret_fault and friends. Alternative return code for
* the case where we get a fault in the doreti_exit code
* above. trap() (i386/i386/trap.c) catches this specific
* case, sends the process a signal and continues in the
* corresponding place in the code below.
*/
ALIGN_TEXT
.globl doreti_iret_fault
doreti_iret_fault:
subl $8,%esp
pushal
pushl %ds
.globl doreti_popl_ds_fault
doreti_popl_ds_fault:
pushl %es
.globl doreti_popl_es_fault
doreti_popl_es_fault:
pushl %fs
.globl doreti_popl_fs_fault
doreti_popl_fs_fault:
movl $0,TF_ERR(%esp) /* XXX should be the error code */
movl $T_PROTFLT,TF_TRAPNO(%esp)
jmp alltraps_with_regs_pushed
/*
# [How we do the fixup]. We want to merge the current stack frame with the
# just-interrupted frame. How we do this depends on where in the critical
# region the interrupted handler was executing, and so how many saved
# registers are in each frame. We do this quickly using the lookup table
# 'critical_fixup_table'. For each byte offset in the critical region, it
# provides the number of bytes which have already been popped from the
# interrupted stack frame.
*/
.globl critical_region_fixup
critical_region_fixup:
addl $critical_fixup_table-scrit,%eax
movzbl (%eax),%eax # %eax contains num bytes popped
movl %esp,%esi
add %eax,%esi # %esi points at end of src region
movl %esp,%edi
add $0x40,%edi # %edi points at end of dst region
movl %eax,%ecx
shr $2,%ecx # convert bytes to words
je 16f # skip loop if nothing to copy
15: subl $4,%esi # pre-decrementing copy loop
subl $4,%edi
movl (%esi),%eax
movl %eax,(%edi)
loop 15b
16: movl %edi,%esp # final %edi is top of merged stack
jmp hypervisor_callback_pending
critical_fixup_table:
.byte 0x0,0x0,0x0 #testb $0x1,(%esi)
.byte 0x0,0x0,0x0,0x0,0x0,0x0 #jne ea
.byte 0x0,0x0 #pop %fs
.byte 0x04 #pop %es
.byte 0x08 #pop %ds
.byte 0x0c #pop %edi
.byte 0x10 #pop %esi
.byte 0x14 #pop %ebp
.byte 0x18 #pop %ebx
.byte 0x1c #pop %ebx
.byte 0x20 #pop %edx
.byte 0x24 #pop %ecx
.byte 0x28 #pop %eax
.byte 0x2c,0x2c,0x2c #add $0x8,%esp
#if 0
.byte 0x34 #iret
#endif
.byte 0x34,0x34,0x34,0x34,0x34 #HYPERVISOR_iret
/* # Hypervisor uses this for application faults while it executes.*/
ENTRY(failsafe_callback)
pushal
call xen_failsafe_handler
/*# call install_safe_pf_handler */
movl 28(%esp),%ebx
1: movl %ebx,%ds
movl 32(%esp),%ebx
2: movl %ebx,%es
movl 36(%esp),%ebx
3: movl %ebx,%fs
movl 40(%esp),%ebx
4: movl %ebx,%gs
/*# call install_normal_pf_handler */
popal
addl $12,%esp
iret

View File

@ -1,360 +0,0 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $FreeBSD$
*
* originally from: locore.s, by William F. Jolitz
*
* Substantially rewritten by David Greenman, Rod Grimes,
* Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
* and many others.
*/
#include "opt_bootp.h"
#include "opt_compat.h"
#include "opt_nfsroot.h"
#include "opt_pmap.h"
#include <sys/syscall.h>
#include <sys/reboot.h>
#include <machine/asmacros.h>
#include <machine/cputypes.h>
#include <machine/psl.h>
#include <machine/pmap.h>
#include <machine/specialreg.h>
#define __ASSEMBLY__
#include <xen/interface/elfnote.h>
/* The defines below have been lifted out of <machine/xen-public/arch-x86_32.h> */
#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
#define KERNEL_CS FLAT_RING1_CS
#define KERNEL_DS FLAT_RING1_DS
#include "assym.s"
.section __xen_guest
.ascii "LOADER=generic,GUEST_OS=freebsd,GUEST_VER=7.0,XEN_VER=xen-3.0,BSD_SYMTAB,VIRT_BASE=0xc0000000"
.byte 0
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "FreeBSD")
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "HEAD")
ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, KERNBASE)
ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, KERNBASE)
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, btext)
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, XEN_HYPERVISOR_VIRT_START)
#if 0
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
#endif
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|supervisor_mode_kernel|writable_descriptor_tables")
#ifdef PAE
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, PG_V, PG_V)
#else
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, PG_V, PG_V)
#endif
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
/*
* XXX
*
* Note: This version greatly munged to avoid various assembler errors
* that may be fixed in newer versions of gas. Perhaps newer versions
* will have more pleasant appearance.
*/
/*
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
*/
.globl PTmap,PTD,PTDpde
.set PTmap,(PTDPTDI << PDRSHIFT)
.set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
.set PTDpde,PTD + (PTDPTDI * PDESIZE)
/*
* Compiled KERNBASE location and the kernel load address
*/
.globl kernbase
.set kernbase,KERNBASE
.globl kernload
.set kernload,KERNLOAD
/*
* Globals
*/
.data
ALIGN_DATA /* just to be sure */
.space 0x2000 /* space for tmpstk - temporary stack */
tmpstk:
.globl bootinfo
bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
.globl KERNend
KERNend: .long 0 /* phys addr end of kernel (just after bss) */
.globl physfree
physfree: .long 0 /* phys addr of next free page */
.globl IdlePTD
IdlePTD: .long 0 /* phys addr of kernel PTD */
#ifdef PAE
.globl IdlePDPT
IdlePDPT: .long 0 /* phys addr of kernel PDPT */
#endif
#ifdef SMP
.globl KPTphys
#endif
KPTphys: .long 0 /* phys addr of kernel page tables */
.globl gdtset
gdtset: .long 0 /* GDT is valid */
.globl proc0kstack
proc0kstack: .long 0 /* address of proc 0 kstack space */
p0kpa: .long 0 /* phys addr of proc0's STACK */
vm86phystk: .long 0 /* PA of vm86/bios stack */
.globl vm86paddr, vm86pa
vm86paddr: .long 0 /* address of vm86 region */
vm86pa: .long 0 /* phys addr of vm86 region */
#ifdef PC98
.globl pc98_system_parameter
pc98_system_parameter:
.space 0x240
#endif
.globl avail_space
avail_space: .long 0
/**********************************************************************
*
* Some handy macros
*
*/
/*
* We're already in protected mode, so no remapping is needed.
*/
#define R(foo) (foo)
#define ALLOCPAGES(foo) \
movl R(physfree), %esi ; \
movl $((foo)*PAGE_SIZE), %eax ; \
addl %esi, %eax ; \
movl %eax, R(physfree) ; \
movl %esi, %edi ; \
movl $((foo)*PAGE_SIZE),%ecx ; \
xorl %eax,%eax ; \
cld ; \
rep ; \
stosb
/*
* fillkpt
* eax = page frame address
* ebx = index into page table
* ecx = how many pages to map
* base = base address of page dir/table
* prot = protection bits
*/
#define fillkpt(base, prot) \
shll $PTESHIFT,%ebx ; \
addl base,%ebx ; \
orl $PG_V,%eax ; \
orl prot,%eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $PTESIZE,%ebx ; /* next pte */ \
loop 1b
/*
* fillkptphys(prot)
* eax = physical address
* ecx = how many pages to map
* prot = protection bits
*/
#define fillkptphys(prot) \
movl %eax, %ebx ; \
shrl $PAGE_SHIFT, %ebx ; \
fillkpt(R(KPTphys), prot)
/* Temporary stack */
.space 8192
tmpstack:
.long tmpstack, KERNEL_DS
.text
.p2align 12, 0x90
#define HYPERCALL_PAGE_OFFSET 0x1000
.org HYPERCALL_PAGE_OFFSET
ENTRY(hypercall_page)
.cfi_startproc
.skip 0x1000
.cfi_endproc
/**********************************************************************
*
* This is where the bootblocks start us, set the ball rolling...
*
*/
NON_GPROF_ENTRY(btext)
/* At the end of our stack, we shall have free space - so store it */
movl %esp,%ebx
movl %ebx,R(avail_space)
lss tmpstack,%esp
pushl %esi
call initvalues
popl %esi
/* Store the CPUID information */
xorl %eax,%eax
cpuid # cpuid 0
movl %eax,R(cpu_high) # highest capability
movl %ebx,R(cpu_vendor) # store vendor string
movl %edx,R(cpu_vendor+4)
movl %ecx,R(cpu_vendor+8)
movb $0,R(cpu_vendor+12)
movl $1,%eax
cpuid # cpuid 1
movl %eax,R(cpu_id) # store cpu_id
movl %ebx,R(cpu_procinfo) # store cpu_procinfo
movl %edx,R(cpu_feature) # store cpu_feature
movl %ecx,R(cpu_feature2) # store cpu_feature2
rorl $8,%eax # extract family type
andl $15,%eax
cmpl $5,%eax
movl $CPU_686,R(cpu)
movl proc0kstack,%eax
leal (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
xorl %ebp,%ebp /* mark end of frames */
#ifdef PAE
movl IdlePDPT,%esi
#else
movl IdlePTD,%esi
#endif
movl %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
pushl physfree
call init386
addl $4, %esp
call mi_startup
/* NOTREACHED */
int $3
/*
* Signal trampoline, copied to top of user stack
*/
NON_GPROF_ENTRY(sigcode)
calll *SIGF_HANDLER(%esp)
leal SIGF_UC(%esp),%eax /* get ucontext */
pushl %eax
testl $PSL_VM,UC_EFLAGS(%eax)
jne 1f
mov UC_GS(%eax), %gs /* restore %gs */
1:
movl $SYS_sigreturn,%eax
pushl %eax /* junk to fake return addr. */
int $0x80 /* enter kernel with args */
/* on stack */
1:
jmp 1b
#ifdef COMPAT_FREEBSD4
ALIGN_TEXT
freebsd4_sigcode:
calll *SIGF_HANDLER(%esp)
leal SIGF_UC4(%esp),%eax /* get ucontext */
pushl %eax
testl $PSL_VM,UC4_EFLAGS(%eax)
jne 1f
mov UC4_GS(%eax),%gs /* restore %gs */
1:
movl $344,%eax /* 4.x SYS_sigreturn */
pushl %eax /* junk to fake return addr. */
int $0x80 /* enter kernel with args */
/* on stack */
1:
jmp 1b
#endif
#ifdef COMPAT_43
ALIGN_TEXT
osigcode:
call *SIGF_HANDLER(%esp) /* call signal handler */
lea SIGF_SC(%esp),%eax /* get sigcontext */
pushl %eax
testl $PSL_VM,SC_PS(%eax)
jne 9f
movl SC_GS(%eax),%gs /* restore %gs */
9:
movl $103,%eax /* 3.x SYS_sigreturn */
pushl %eax /* junk to fake return addr. */
int $0x80 /* enter kernel with args */
0: jmp 0b
#endif /* COMPAT_43 */
ALIGN_TEXT
esigcode:
.data
.globl szsigcode
szsigcode:
.long esigcode-sigcode
#ifdef COMPAT_FREEBSD4
.globl szfreebsd4_sigcode
szfreebsd4_sigcode:
.long esigcode-freebsd4_sigcode
#endif
#ifdef COMPAT_43
.globl szosigcode
szosigcode:
.long esigcode-osigcode
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,109 +0,0 @@
/*-
* Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
* Copyright (c) 1996, by Steve Passe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. The name of the developer may NOT be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/frame.h>
#include <machine/intr_machdep.h>
#include <x86/apicvar.h>
#include <xen/hypervisor.h>
#include <xen/xen-os.h>
#include <machine/smp.h>
#include <xen/interface/vcpu.h>
static int mptable_probe(void);
static int mptable_probe_cpus(void);
static void mptable_register(void *dummy);
static int mptable_setup_local(void);
static int mptable_setup_io(void);
static struct apic_enumerator mptable_enumerator = {
"MPTable",
mptable_probe,
mptable_probe_cpus,
mptable_setup_local,
mptable_setup_io
};
static int
mptable_probe(void)
{
return (-100);
}
static int
mptable_probe_cpus(void)
{
int i, rc;
for (i = 0; i < MAXCPU; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0)
cpu_add(i, (i == 0));
}
return (0);
}
/*
* Initialize the local APIC on the BSP.
*/
static int
mptable_setup_local(void)
{
PCPU_SET(apic_id, 0);
PCPU_SET(vcpu_id, 0);
return (0);
}
static int
mptable_setup_io(void)
{
return (0);
}
static void
mptable_register(void *dummy __unused)
{
apic_register_enumerator(&mptable_enumerator);
}
SYSINIT(mptable_register, SI_SUB_TUNABLES - 1, SI_ORDER_FIRST, mptable_register,
NULL);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1455,12 +1455,7 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
/* Schedule the ithread if needed. */
if (thread) {
error = intr_event_schedule_thread(ie);
#ifndef XEN
KASSERT(error == 0, ("bad stray interrupt"));
#else
if (error != 0)
log(LOG_WARNING, "bad stray interrupt");
#endif
}
critical_exit();
td->td_intr_nesting_level--;

View File

@ -66,12 +66,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
#ifdef XEN
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#endif
#define KTDSTATE(td) \
(((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
@ -475,9 +469,6 @@ mi_switch(int flags, struct thread *newtd)
"lockname:\"%s\"", td->td_lockname);
#endif
SDT_PROBE0(sched, , , preempt);
#ifdef XEN
PT_UPDATES_FLUSH();
#endif
sched_switch(td, newtd, flags);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
"prio:%d", td->td_priority);

View File

@ -99,11 +99,7 @@ pid_t pid_max = PID_MAX;
long maxswzone; /* max swmeta KVA storage */
long maxbcache; /* max buffer cache KVA storage */
long maxpipekva; /* Limit on pipe KVA */
#ifdef XEN
int vm_guest = VM_GUEST_XEN;
#else
int vm_guest = VM_GUEST_NO; /* Running as virtual machine guest? */
#endif
u_long maxtsiz; /* max text size */
u_long dfldsiz; /* initial data size limit */
u_long maxdsiz; /* max data size */

View File

@ -80,12 +80,6 @@ __FBSDID("$FreeBSD$");
#include <net/vnet.h>
#endif
#ifdef XEN
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#endif
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
#endif
@ -136,9 +130,6 @@ userret(struct thread *td, struct trapframe *frame)
* Let the scheduler adjust our priority etc.
*/
sched_userret(td);
#ifdef XEN
PT_UPDATES_FLUSH();
#endif
/*
* Check for misbehavior.

View File

@ -431,14 +431,6 @@ vm_page_startup(vm_offset_t vaddr)
phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
}
#ifdef XEN
/*
* There is no obvious reason why i386 PV Xen needs vm_page structs
* created for these pseudo-physical addresses. XXX
*/
vm_phys_add_seg(0, phys_avail[0]);
#endif
low_water = phys_avail[0];
high_water = phys_avail[1];

View File

@ -46,11 +46,7 @@
*/
#define SEL_RPL_MASK 3 /* requester priv level */
#define ISPL(s) ((s)&3) /* priority level of a selector */
#ifdef XEN
#define SEL_KPL 1 /* kernel priority level */
#else
#define SEL_KPL 0 /* kernel priority level */
#endif
#define SEL_UPL 3 /* user priority level */
#define ISLDT(s) ((s)&SEL_LDT) /* is it local or global */
#define SEL_LDT 4 /* local descriptor table */
@ -244,11 +240,7 @@ union descriptor {
#define GBIOSUTIL_SEL 16 /* BIOS interface (Utility) */
#define GBIOSARGS_SEL 17 /* BIOS interface (Arguments) */
#define GNDIS_SEL 18 /* For the NDIS layer */
#ifdef XEN
#define NGDT 9
#else
#define NGDT 19
#endif
/*
* Entries in the Local Descriptor Table (LDT)

View File

@ -147,11 +147,6 @@ static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int flags);
#ifdef XEN
#undef pmap_kextract
#define pmap_kextract pmap_kextract_ma
#endif
/*
* Allocate a device specific dma_tag.
*/

View File

@ -100,15 +100,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pager.h>
#include <vm/vm_param.h>
#ifdef XEN
/* XEN includes */
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
#include <machine/xen/xenvar.h>
#include <machine/xen/xenfunc.h>
#include <xen/xen_intr.h>
#endif
/*
* Machine dependent boot() routine
*
@ -193,33 +184,6 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
return (0);
}
#if defined(__i386__) && defined(XEN)
static void
idle_block(void)
{
HYPERVISOR_sched_op(SCHEDOP_block, 0);
}
void
cpu_halt(void)
{
HYPERVISOR_shutdown(SHUTDOWN_poweroff);
}
int scheduler_running;
static void
cpu_idle_hlt(sbintime_t sbt)
{
scheduler_running = 1;
enable_intr();
idle_block();
}
#else
/*
* Shutdown the CPU as much as possible
*/
@ -230,8 +194,6 @@ cpu_halt(void)
halt();
}
#endif
void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
@ -263,7 +225,6 @@ cpu_idle_acpi(sbintime_t sbt)
}
#endif /* !PC98 */
#if !defined(__i386__) || !defined(XEN)
static void
cpu_idle_hlt(sbintime_t sbt)
{
@ -295,7 +256,6 @@ cpu_idle_hlt(sbintime_t sbt)
__asm __volatile("sti; hlt");
*state = STATE_RUNNING;
}
#endif
static void
cpu_idle_mwait(sbintime_t sbt)
@ -370,7 +330,7 @@ cpu_probe_amdc1e(void)
}
}
#if defined(__i386__) && (defined(PC98) || defined(XEN))
#if defined(__i386__) && defined(PC98)
void (*cpu_idle_fn)(sbintime_t) = cpu_idle_hlt;
#else
void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
@ -379,17 +339,15 @@ void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
void
cpu_idle(int busy)
{
#if !defined(__i386__) || !defined(XEN)
uint64_t msr;
#endif
sbintime_t sbt = -1;
CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
busy, curcpu);
#if defined(MP_WATCHDOG) && (!defined(__i386__) || !defined(XEN))
#ifdef MP_WATCHDOG
ap_watchdog(PCPU_GET(cpuid));
#endif
#if !defined(__i386__) || !defined(XEN)
/* If we are busy - try to use fast methods. */
if (busy) {
if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
@ -397,7 +355,6 @@ cpu_idle(int busy)
goto out;
}
}
#endif
/* If we have time - switch timers into idle mode. */
if (!busy) {
@ -405,14 +362,12 @@ cpu_idle(int busy)
sbt = cpu_idleclock();
}
#if !defined(__i386__) || !defined(XEN)
/* Apply AMD APIC timer C1E workaround. */
if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
msr = rdmsr(MSR_AMDK8_IPM);
if (msr & AMDK8_CMPHALT)
wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
}
#endif
/* Call main idle method. */
cpu_idle_fn(sbt);
@ -422,9 +377,7 @@ cpu_idle(int busy)
cpu_activeclock();
critical_exit();
}
#if !defined(__i386__) || !defined(XEN)
out:
#endif
CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
busy, curcpu);
}

View File

@ -1190,7 +1190,6 @@ hook_tsc_freq(void *arg __unused)
SYSINIT(hook_tsc_freq, SI_SUB_CONFIGURE, SI_ORDER_ANY, hook_tsc_freq, NULL);
#ifndef XEN
static const char *const vm_bnames[] = {
"QEMU", /* QEMU */
"Plex86", /* Plex86 */
@ -1281,7 +1280,6 @@ identify_hypervisor(void)
freeenv(p);
}
}
#endif
/*
* Final stage of CPU identification.
@ -1314,9 +1312,7 @@ identify_cpu(void)
cpu_feature2 = regs[2];
#endif
#ifndef XEN
identify_hypervisor();
#endif
cpu_vendor_id = find_cpu_vendor_id();
/*

View File

@ -532,13 +532,6 @@ intr_shuffle_irqs(void *arg __unused)
struct intsrc *isrc;
int i;
#ifdef XEN
/*
* Doesn't work yet
*/
return;
#endif
/* Don't bother on UP. */
if (mp_ncpus == 1)
return;

View File

@ -1579,17 +1579,13 @@ apic_setup_io(void *dummy __unused)
* Local APIC must be registered before other PICs and pseudo PICs
* for proper suspend/resume order.
*/
#ifndef XEN
intr_register_pic(&lapic_pic);
#endif
retval = best_enum->apic_setup_io();
if (retval != 0)
printf("%s: Failed to setup I/O APICs: returned %d\n",
best_enum->apic_name, retval);
#ifdef XEN
return;
#endif
/*
* Finish setting up the local APIC on the BSP once we know
* how to properly program the LINT pins. In particular, this

View File

@ -1,7 +1,7 @@
/******************************************************************************
* xen_intr.c
*
* Xen event and interrupt services for x86 PV and HVM guests.
* Xen event and interrupt services for x86 HVM guests.
*
* Copyright (c) 2002-2005, K A Fraser
* Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
@ -864,10 +864,8 @@ xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
u_int to_cpu, vcpu_id;
int error, masked;
#ifdef XENHVM
if (xen_vector_callback_enabled == 0)
return (EOPNOTSUPP);
#endif
to_cpu = apic_cpuid(apic_id);
vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;

View File

@ -66,14 +66,11 @@ static int
nexus_xen_attach(device_t dev)
{
int error;
#ifndef XEN
device_t acpi_dev = NULL;
#endif
nexus_init_resources();
bus_generic_probe(dev);
#ifndef XEN
if (xen_initial_domain()) {
/* Disable some ACPI devices that are not usable by Dom0 */
acpi_cpu_disabled = true;
@ -84,13 +81,10 @@ nexus_xen_attach(device_t dev)
if (acpi_dev == NULL)
panic("Unable to add ACPI bus to Xen Dom0");
}
#endif
error = bus_generic_attach(dev);
#ifndef XEN
if (xen_initial_domain() && (error == 0))
acpi_install_wakeup_handler(device_get_softc(acpi_dev));
#endif
return (error);
}