1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-11 09:50:12 +00:00

vmd(4): Major driver refactoring

- Re-implement pcib interface to use standard pci bus driver on top of
vmd(4) instead of custom one.
 - Re-implement memory/bus resource allocation to properly handle even
complicated configurations.
 - Re-implement interrupt handling to evenly distribute children's MSI/
MSI-X interrupts between available vmd(4) MSI-X vectors and setup them
to be handled by standard OS mechanisms with minimal overhead, except
sharing when unavoidable.

Successfully tested on Dell XPS 13 laptop with Core i7-1185G7 CPU (VMD
device ID 0x9a0b) and single NVMe SSD, dual-booting with Windows 10.

Successfully tested on Supermicro X11DPI-NT motherboard with Xeon(R)
Gold 6242R CPUs (VMD device ID 0x201d), simultaneously handling NVMe
SSD on one PCIe port and PLX bridge with 3 NVMe and 1 AHCI SSDs on
another.  Handles SSD hot-plug (except Optane 905p for some reason,
which are not detected until manual bus rescan) and enabled IOMMU
(directly connected SSDs work, but ones connected to the PLX fail
without errors from IOMMU).

MFC after:	2 weeks
Sponsored by:	iXsystems, Inc.
Differential revision:	https://reviews.freebsd.org/D31762
This commit is contained in:
Alexander Motin 2021-09-02 20:58:02 -04:00
parent 8c14d7da5b
commit 7af4475a6e
12 changed files with 391 additions and 665 deletions

View File

@ -842,6 +842,7 @@ _tpm.4= tpm.4
_urtw.4= urtw.4
_viawd.4= viawd.4
_vmci.4= vmci.4
_vmd.4= vmd.4
_vmx.4= vmx.4
_wbwd.4= wbwd.4
_wpi.4= wpi.4
@ -860,7 +861,6 @@ _qlnxe.4= qlnxe.4
_sfxge.4= sfxge.4
_smartpqi.4= smartpqi.4
_sume.4= sume.4
_vmd.4= vmd.4
MLINKS+=qlxge.4 if_qlxge.4
MLINKS+=qlxgb.4 if_qlxgb.4

View File

@ -1,6 +1,7 @@
.\"-
.\" SPDX-License-Identifier: BSD-2-Clause-FreeBSD
.\"
.\" Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
.\" Copyright 2019 Cisco Systems, Inc.
.\"
.\" Redistribution and use in source and binary forms, with or without
@ -26,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd October 9, 2019
.Dd August 31, 2021
.Dt VMD 4
.Os
.Sh NAME
@ -37,7 +38,6 @@ To compile this driver into the kernel, place the following lines in your
kernel configuration file:
.Bd -ragged -offset -indent
.Cd "device vmd"
.Cd "device vmd_bus"
.Ed
.Pp
Alternatively, to load the driver as a module at boot time, place the following
@ -47,10 +47,30 @@ line in
vmd_load="YES"
.Ed
.Sh DESCRIPTION
This driver attaches to Intel VMD devices as a new PCI domain and then
triggers a probe of PCI devices.
Intel VMD is used with Intel's VROC (Virtual RAID on chip) used with
NVME drives on Skylake SP servers.
This driver attaches to Intel VMD devices, representing them as PCI-to-PCI
bridges and providing access to children PCI devices via new PCI domains.
Intel VMD is used by Intel's VROC (Virtual RAID on chip) to manage NVMe
drives.
.Sh LOADER TUNABLES
The following tunables are settable via
.Xr loader 8
or
.Xr sysctl 8 :
.Bl -tag -width indent
.It Va hw.vmd.max_msi
Limits number of Message Signaled Interrupt (MSI) vectors allowed to each
child device.
VMD can't distinguish MSI vectors of the same device, so there are no
benefits to have more than one, unless it is required by specific device
driver.
Defaults to 1.
.It Va hw.vmd.max_msix
Limits number of Extended Message Signaled Interrupt (MSI-X) vectors
allowed to each child device.
VMD has limited number of interrupt vectors to map children interrupts into,
so to avoid/reduce sharing children devices/drivers need to be constrained.
Defaults to 3.
.El
.Sh SEE ALSO
.Xr graid 8
.Sh HISTORY
@ -58,6 +78,3 @@ The
.Nm
driver first appeared in
.Fx 13.0 .
.Sh BUGS
.Nm
is currently only available on amd64.

View File

@ -201,8 +201,7 @@ device nvme # base NVMe driver
device nvd # expose NVMe namespaces as disks, depends on nvme
# Intel Volume Management Device (VMD) support
device vmd # base VMD device
device vmd_bus # bus for VMD children
device vmd
# atkbdc0 controls both the keyboard and the PS/2 mouse
device atkbdc # AT keyboard controller

View File

@ -461,8 +461,7 @@ device nvd # expose NVMe namespaces as disks, depends on nvme
#
# Intel Volume Management Device (VMD) support
device vmd # base VMD device
device vmd_bus # bus for VMD children
device vmd
#
# PMC-Sierra SAS/SATA controller

View File

@ -360,8 +360,7 @@ dev/tpm/tpm_acpi.c optional tpm acpi
dev/tpm/tpm_isa.c optional tpm isa
dev/uart/uart_cpu_x86.c optional uart
dev/viawd/viawd.c optional viawd
dev/vmd/vmd.c optional vmd
dev/vmd/vmd_bus.c optional vmd_bus
dev/vmd/vmd.c optional vmd | vmd_bus
dev/wbwd/wbwd.c optional wbwd
dev/p2sb/p2sb.c optional p2sb pci
dev/p2sb/lewisburg_gpiocm.c optional lbggpiocm p2sb

View File

@ -79,6 +79,7 @@ dev/tpm/tpm_acpi.c optional tpm acpi
dev/tpm/tpm_isa.c optional tpm isa
dev/uart/uart_cpu_x86.c optional uart
dev/viawd/viawd.c optional viawd
dev/vmd/vmd.c optional vmd
dev/acpi_support/acpi_wmi_if.m standard
dev/wbwd/wbwd.c optional wbwd
i386/acpica/acpi_machdep.c optional acpi

View File

@ -1,6 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
* Copyright 2019 Cisco Systems, Inc.
* All rights reserved.
*
@ -34,16 +35,18 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/intr_machdep.h>
#include <sys/rman.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/taskqueue.h>
#include <sys/pciio.h>
#include <dev/pci/pcivar.h>
@ -51,27 +54,19 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pci_private.h>
#include <dev/pci/pcib_private.h>
#define TASK_QUEUE_INTR 1
#include <dev/vmd/vmd.h>
#include "pcib_if.h"
#include "pci_if.h"
struct vmd_type {
u_int16_t vmd_vid;
u_int16_t vmd_did;
char *vmd_name;
int flags;
#define BUS_RESTRICT 1
#define BUS_RESTRICT 1
#define VECTOR_OFFSET 2
};
#define INTEL_VENDOR_ID 0x8086
#define INTEL_DEVICE_ID_201d 0x201d
#define INTEL_DEVICE_ID_28c0 0x28c0
#define INTEL_DEVICE_ID_467f 0x467f
#define INTEL_DEVICE_ID_4c3d 0x4c3d
#define INTEL_DEVICE_ID_9a0b 0x9a0b
#define VMD_CAP 0x40
#define VMD_BUS_RESTRICT 0x1
@ -80,12 +75,32 @@ struct vmd_type {
#define VMD_LOCK 0x70
SYSCTL_NODE(_hw, OID_AUTO, vmd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Intel Volume Management Device tuning parameters");
/*
* All MSIs within a group share address, so VMD can't distinguish them.
* It makes no sense to use more than one per device, only if required by
* some specific device drivers.
*/
static int vmd_max_msi = 1;
SYSCTL_INT(_hw_vmd, OID_AUTO, max_msi, CTLFLAG_RWTUN, &vmd_max_msi, 0,
"Maximum number of MSI vectors per device");
/*
* MSI-X can use different addresses, but we have limited number of MSI-X
* we can route to, so use conservative default to try to avoid sharing.
*/
static int vmd_max_msix = 3;
SYSCTL_INT(_hw_vmd, OID_AUTO, max_msix, CTLFLAG_RWTUN, &vmd_max_msix, 0,
"Maximum number of MSI-X vectors per device");
static struct vmd_type vmd_devs[] = {
{ INTEL_VENDOR_ID, INTEL_DEVICE_ID_201d, "Intel Volume Management Device", 0 },
{ INTEL_VENDOR_ID, INTEL_DEVICE_ID_28c0, "Intel Volume Management Device", BUS_RESTRICT },
{ INTEL_VENDOR_ID, INTEL_DEVICE_ID_467f, "Intel Volume Management Device", BUS_RESTRICT },
{ INTEL_VENDOR_ID, INTEL_DEVICE_ID_4c3d, "Intel Volume Management Device", BUS_RESTRICT },
{ INTEL_VENDOR_ID, INTEL_DEVICE_ID_9a0b, "Intel Volume Management Device", BUS_RESTRICT },
{ 0x8086, 0x201d, "Intel Volume Management Device", 0 },
{ 0x8086, 0x28c0, "Intel Volume Management Device", BUS_RESTRICT },
{ 0x8086, 0x467f, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
{ 0x8086, 0x4c3d, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
{ 0x8086, 0x9a0b, "Intel Volume Management Device", BUS_RESTRICT | VECTOR_OFFSET },
{ 0, 0, NULL, 0 }
};
@ -95,73 +110,51 @@ vmd_probe(device_t dev)
struct vmd_type *t;
uint16_t vid, did;
t = vmd_devs;
vid = pci_get_vendor(dev);
did = pci_get_device(dev);
while (t->vmd_name != NULL) {
if (vid == t->vmd_vid &&
did == t->vmd_did) {
for (t = vmd_devs; t->vmd_name != NULL; t++) {
if (vid == t->vmd_vid && did == t->vmd_did) {
device_set_desc(dev, t->vmd_name);
return (BUS_PROBE_DEFAULT);
}
t++;
}
return (ENXIO);
}
static void
vmd_free(struct vmd_softc *sc)
{
struct vmd_irq *vi;
struct vmd_irq_user *u;
int i;
struct vmd_irq_handler *elm, *tmp;
if (sc->vmd_bus.rman.rm_end != 0)
rman_fini(&sc->vmd_bus.rman);
#ifdef TASK_QUEUE_INTR
if (sc->vmd_irq_tq != NULL) {
taskqueue_drain(sc->vmd_irq_tq, &sc->vmd_irq_task);
taskqueue_free(sc->vmd_irq_tq);
sc->vmd_irq_tq = NULL;
if (sc->psc.bus.rman.rm_end != 0)
rman_fini(&sc->psc.bus.rman);
if (sc->psc.mem.rman.rm_end != 0)
rman_fini(&sc->psc.mem.rman);
while ((u = LIST_FIRST(&sc->vmd_users)) != NULL) {
LIST_REMOVE(u, viu_link);
free(u, M_DEVBUF);
}
#endif
if (sc->vmd_irq != NULL) {
for (i = 0; i < sc->vmd_msix_count; i++) {
if (sc->vmd_irq[i].vmd_res != NULL) {
bus_teardown_intr(sc->vmd_dev,
sc->vmd_irq[i].vmd_res,
sc->vmd_irq[i].vmd_handle);
bus_release_resource(sc->vmd_dev, SYS_RES_IRQ,
sc->vmd_irq[i].vmd_rid,
sc->vmd_irq[i].vmd_res);
}
}
TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list ,vmd_link,
tmp) {
TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
free(elm, M_DEVBUF);
vi = &sc->vmd_irq[i];
if (vi->vi_res == NULL)
continue;
bus_teardown_intr(sc->psc.dev, vi->vi_res,
vi->vi_handle);
bus_release_resource(sc->psc.dev, SYS_RES_IRQ,
vi->vi_rid, vi->vi_res);
}
}
free(sc->vmd_irq, M_DEVBUF);
sc->vmd_irq = NULL;
pci_release_msi(sc->vmd_dev);
pci_release_msi(sc->psc.dev);
for (i = 0; i < VMD_MAX_BAR; i++) {
if (sc->vmd_regs_resource[i] != NULL)
bus_release_resource(sc->vmd_dev, SYS_RES_MEMORY,
sc->vmd_regs_rid[i],
sc->vmd_regs_resource[i]);
if (sc->vmd_regs_res[i] != NULL)
bus_release_resource(sc->psc.dev, SYS_RES_MEMORY,
sc->vmd_regs_rid[i], sc->vmd_regs_res[i]);
}
if (sc->vmd_io_resource)
bus_release_resource(device_get_parent(sc->vmd_dev),
SYS_RES_IOPORT, sc->vmd_io_rid, sc->vmd_io_resource);
#ifndef TASK_QUEUE_INTR
if (mtx_initialized(&sc->vmd_irq_lock)) {
mtx_destroy(&sc->vmd_irq_lock);
}
#endif
}
/* Hidden PCI Roots are hidden in BAR(0). */
@ -169,17 +162,16 @@ vmd_free(struct vmd_softc *sc)
static uint32_t
vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
{
struct vmd_softc *sc;
bus_addr_t offset;
sc = device_get_softc(dev);
if (b < sc->vmd_bus_start)
if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
return (0xffffffff);
offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
switch(width) {
switch (width) {
case 4:
return (bus_space_read_4(sc->vmd_btag, sc->vmd_bhandle,
offset));
@ -190,7 +182,7 @@ vmd_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
return (bus_space_read_1(sc->vmd_btag, sc->vmd_bhandle,
offset));
default:
KASSERT(1, ("Invalid width requested"));
__assert_unreachable();
return (0xffffffff);
}
}
@ -199,17 +191,16 @@ static void
vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
uint32_t val, int width)
{
struct vmd_softc *sc;
bus_addr_t offset;
sc = device_get_softc(dev);
if (b < sc->vmd_bus_start)
if (b < sc->vmd_bus_start || b > sc->vmd_bus_end)
return;
offset = ((b - sc->vmd_bus_start) << 20) + (s << 15) + (f << 12) + reg;
switch(width) {
switch (width) {
case 4:
return (bus_space_write_4(sc->vmd_btag, sc->vmd_bhandle,
offset, val));
@ -220,269 +211,162 @@ vmd_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg,
return (bus_space_write_1(sc->vmd_btag, sc->vmd_bhandle,
offset, val));
default:
panic("Failed to specific width");
__assert_unreachable();
}
}
static uint32_t
vmd_pci_read_config(device_t dev, device_t child, int reg, int width)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
return vmd_read_config(dev, cfg->bus, cfg->slot, cfg->func, reg, width);
}
static void
vmd_pci_write_config(device_t dev, device_t child, int reg, uint32_t val,
int width)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
vmd_write_config(dev, cfg->bus, cfg->slot, cfg->func, reg, val, width);
}
static struct pci_devinfo *
vmd_alloc_devinfo(device_t dev)
{
struct pci_devinfo *dinfo;
dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO);
return (dinfo);
}
static void
static int
vmd_intr(void *arg)
{
struct vmd_irq *irq;
struct vmd_softc *sc;
#ifndef TASK_QUEUE_INTR
struct vmd_irq_handler *elm, *tmp_elm;
#endif
irq = (struct vmd_irq *)arg;
sc = irq->vmd_sc;
#ifdef TASK_QUEUE_INTR
taskqueue_enqueue(sc->vmd_irq_tq, &sc->vmd_irq_task);
#else
mtx_lock(&sc->vmd_irq_lock);
TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
(elm->vmd_intr)(elm->vmd_arg);
}
mtx_unlock(&sc->vmd_irq_lock);
#endif
/*
* We have nothing to do here, but we have to register some interrupt
* handler to make PCI code setup and enable the MSI-X vector.
*/
return (FILTER_STRAY);
}
#ifdef TASK_QUEUE_INTR
static void
vmd_handle_irq(void *context, int pending)
{
struct vmd_irq_handler *elm, *tmp_elm;
struct vmd_softc *sc;
sc = context;
TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp_elm) {
(elm->vmd_intr)(elm->vmd_arg);
}
}
#endif
static int
vmd_attach(device_t dev)
{
struct vmd_softc *sc;
struct pcib_secbus *bus;
struct pcib_window *w;
struct vmd_type *t;
struct vmd_irq *vi;
uint16_t vid, did;
uint32_t bar;
int i, j, error;
int rid, sec_reg;
static int b;
static int s;
static int f;
int min_count = 1;
char buf[64];
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
sc->vmd_dev = dev;
b = s = f = 0;
sc->psc.dev = dev;
sc->psc.domain = PCI_DOMAINMAX - device_get_unit(dev);
pci_enable_busmaster(dev);
#ifdef TASK_QUEUE_INTR
sc->vmd_irq_tq = taskqueue_create_fast("vmd_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->vmd_irq_tq);
taskqueue_start_threads(&sc->vmd_irq_tq, 1, PI_DISK, "%s taskq",
device_get_nameunit(sc->vmd_dev));
TASK_INIT(&sc->vmd_irq_task, 0, vmd_handle_irq, sc);
#else
mtx_init(&sc->vmd_irq_lock, "VMD IRQ lock", NULL, MTX_DEF);
#endif
for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++ ) {
for (i = 0, j = 0; i < VMD_MAX_BAR; i++, j++) {
sc->vmd_regs_rid[i] = PCIR_BAR(j);
bar = pci_read_config(dev, PCIR_BAR(0), 4);
if (PCI_BAR_MEM(bar) && (bar & PCIM_BAR_MEM_TYPE) ==
PCIM_BAR_MEM_64)
j++;
if ((sc->vmd_regs_resource[i] = bus_alloc_resource_any(
sc->vmd_dev, SYS_RES_MEMORY, &sc->vmd_regs_rid[i],
RF_ACTIVE)) == NULL) {
if ((sc->vmd_regs_res[i] = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &sc->vmd_regs_rid[i], RF_ACTIVE)) == NULL) {
device_printf(dev, "Cannot allocate resources\n");
goto fail;
}
}
sc->vmd_io_rid = PCIR_IOBASEL_1;
sc->vmd_io_resource = bus_alloc_resource_any(
device_get_parent(sc->vmd_dev), SYS_RES_IOPORT, &sc->vmd_io_rid,
RF_ACTIVE);
if (sc->vmd_io_resource == NULL) {
device_printf(dev, "Cannot allocate IO\n");
goto fail;
}
sc->vmd_btag = rman_get_bustag(sc->vmd_regs_res[0]);
sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_res[0]);
sc->vmd_btag = rman_get_bustag(sc->vmd_regs_resource[0]);
sc->vmd_bhandle = rman_get_bushandle(sc->vmd_regs_resource[0]);
pci_write_config(dev, PCIR_PRIBUS_2,
pcib_get_bus(device_get_parent(dev)), 1);
t = vmd_devs;
vid = pci_get_vendor(dev);
did = pci_get_device(dev);
sc->vmd_bus_start = 0;
while (t->vmd_name != NULL) {
if (vid == t->vmd_vid &&
did == t->vmd_did) {
if (t->flags == BUS_RESTRICT) {
if (pci_read_config(dev, VMD_CAP, 2) &
VMD_BUS_RESTRICT)
switch (VMD_BUS_START(pci_read_config(
dev, VMD_CONFIG, 2))) {
case 1:
sc->vmd_bus_start = 128;
break;
case 2:
sc->vmd_bus_start = 224;
break;
case 3:
device_printf(dev,
"Unknown bug offset\n");
goto fail;
break;
}
}
}
t++;
for (t = vmd_devs; t->vmd_name != NULL; t++) {
if (vid == t->vmd_vid && did == t->vmd_did)
break;
}
device_printf(dev, "VMD bus starts at %d\n", sc->vmd_bus_start);
sc->vmd_bus_start = 0;
if ((t->flags & BUS_RESTRICT) &&
(pci_read_config(dev, VMD_CAP, 2) & VMD_BUS_RESTRICT)) {
switch (VMD_BUS_START(pci_read_config(dev, VMD_CONFIG, 2))) {
case 0:
sc->vmd_bus_start = 0;
break;
case 1:
sc->vmd_bus_start = 128;
break;
case 2:
sc->vmd_bus_start = 224;
break;
default:
device_printf(dev, "Unknown bus offset\n");
goto fail;
}
}
sc->vmd_bus_end = MIN(PCI_BUSMAX, sc->vmd_bus_start +
(rman_get_size(sc->vmd_regs_res[0]) >> 20) - 1);
sec_reg = PCIR_SECBUS_1;
bus = &sc->vmd_bus;
bus->sub_reg = PCIR_SUBBUS_1;
bus->sec = vmd_read_config(dev, b, s, f, sec_reg, 1);
bus->sub = vmd_read_config(dev, b, s, f, bus->sub_reg, 1);
bus = &sc->psc.bus;
bus->sec = sc->vmd_bus_start;
bus->sub = sc->vmd_bus_end;
bus->dev = dev;
bus->rman.rm_start = sc->vmd_bus_start;
bus->rman.rm_start = 0;
bus->rman.rm_end = PCI_BUSMAX;
bus->rman.rm_type = RMAN_ARRAY;
snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
bus->rman.rm_descr = strdup(buf, M_DEVBUF);
error = rman_init(&bus->rman);
if (error) {
device_printf(dev, "Failed to initialize %s bus number rman\n",
device_get_nameunit(dev));
device_printf(dev, "Failed to initialize bus rman\n");
bus->rman.rm_end = 0;
goto fail;
}
/*
* Allocate a bus range. This will return an existing bus range
* if one exists, or a new bus range if one does not.
*/
rid = 0;
bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
min_count, 0);
if (bus->res == NULL) {
/*
* Fall back to just allocating a range of a single bus
* number.
*/
bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
1, 0);
} else if (rman_get_size(bus->res) < min_count) {
/*
* Attempt to grow the existing range to satisfy the
* minimum desired count.
*/
(void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
rman_get_start(bus->res), rman_get_start(bus->res) +
min_count - 1);
error = rman_manage_region(&bus->rman, sc->vmd_bus_start,
sc->vmd_bus_end);
if (error) {
device_printf(dev, "Failed to add resource to bus rman\n");
goto fail;
}
/*
* Add the initial resource to the rman.
*/
if (bus->res != NULL) {
error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
rman_get_end(bus->res));
if (error) {
device_printf(dev, "Failed to add resource to rman\n");
goto fail;
}
bus->sec = rman_get_start(bus->res);
bus->sub = rman_get_end(bus->res);
w = &sc->psc.mem;
w->rman.rm_type = RMAN_ARRAY;
snprintf(buf, sizeof(buf), "%s memory window", device_get_nameunit(dev));
w->rman.rm_descr = strdup(buf, M_DEVBUF);
error = rman_init(&w->rman);
if (error) {
device_printf(dev, "Failed to initialize memory rman\n");
w->rman.rm_end = 0;
goto fail;
}
error = rman_manage_region(&w->rman,
rman_get_start(sc->vmd_regs_res[1]),
rman_get_end(sc->vmd_regs_res[1]));
if (error) {
device_printf(dev, "Failed to add resource to memory rman\n");
goto fail;
}
error = rman_manage_region(&w->rman,
rman_get_start(sc->vmd_regs_res[2]) + 0x2000,
rman_get_end(sc->vmd_regs_res[2]));
if (error) {
device_printf(dev, "Failed to add resource to memory rman\n");
goto fail;
}
LIST_INIT(&sc->vmd_users);
sc->vmd_fist_vector = (t->flags & VECTOR_OFFSET) ? 1 : 0;
sc->vmd_msix_count = pci_msix_count(dev);
if (pci_alloc_msix(dev, &sc->vmd_msix_count) == 0) {
sc->vmd_irq = malloc(sizeof(struct vmd_irq) *
sc->vmd_msix_count,
M_DEVBUF, M_WAITOK | M_ZERO);
sc->vmd_msix_count, M_DEVBUF, M_WAITOK | M_ZERO);
for (i = 0; i < sc->vmd_msix_count; i++) {
sc->vmd_irq[i].vmd_rid = i + 1;
sc->vmd_irq[i].vmd_sc = sc;
sc->vmd_irq[i].vmd_instance = i;
sc->vmd_irq[i].vmd_res = bus_alloc_resource_any(dev,
SYS_RES_IRQ, &sc->vmd_irq[i].vmd_rid,
RF_ACTIVE);
if (sc->vmd_irq[i].vmd_res == NULL) {
device_printf(dev,"Failed to alloc irq\n");
vi = &sc->vmd_irq[i];
vi->vi_rid = i + 1;
vi->vi_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&vi->vi_rid, RF_ACTIVE | RF_SHAREABLE);
if (vi->vi_res == NULL) {
device_printf(dev, "Failed to allocate irq\n");
goto fail;
}
TAILQ_INIT(&sc->vmd_irq[i].vmd_list);
if (bus_setup_intr(dev, sc->vmd_irq[i].vmd_res,
INTR_TYPE_MISC | INTR_MPSAFE, NULL, vmd_intr,
&sc->vmd_irq[i], &sc->vmd_irq[i].vmd_handle)) {
device_printf(sc->vmd_dev,
"Cannot set up interrupt\n");
sc->vmd_irq[i].vmd_res = NULL;
vi->vi_irq = rman_get_start(vi->vi_res);
if (bus_setup_intr(dev, vi->vi_res, INTR_TYPE_MISC |
INTR_MPSAFE, vmd_intr, NULL, vi, &vi->vi_handle)) {
device_printf(dev, "Can't set up interrupt\n");
bus_release_resource(dev, SYS_RES_IRQ,
vi->vi_rid, vi->vi_res);
vi->vi_res = NULL;
goto fail;
}
}
}
sc->vmd_child = device_add_child(dev, NULL, -1);
if (sc->vmd_child == NULL) {
device_printf(dev, "Failed to attach child\n");
goto fail;
}
sc->vmd_dma_tag = bus_get_dma_tag(dev);
error = device_probe_and_attach(sc->vmd_child);
if (error) {
device_printf(dev, "Failed to add probe child: %d\n", error);
(void)device_delete_child(dev, sc->vmd_child);
goto fail;
}
return (0);
sc->psc.child = device_add_child(dev, "pci", -1);
return (bus_generic_attach(dev));
fail:
vmd_free(sc);
@ -492,150 +376,218 @@ vmd_attach(device_t dev)
static int
vmd_detach(device_t dev)
{
struct vmd_softc *sc;
int err;
struct vmd_softc *sc = device_get_softc(dev);
int error;
sc = device_get_softc(dev);
if (sc->vmd_child != NULL) {
err = bus_generic_detach(sc->vmd_child);
if (err)
return (err);
err = device_delete_child(dev, sc->vmd_child);
if (err)
return (err);
}
error = bus_generic_detach(dev);
if (error)
return (error);
error = device_delete_children(dev);
if (error)
return (error);
vmd_free(sc);
return (0);
}
/* Pass request to alloc an MSI-X message up to the parent bridge. */
static int
vmd_alloc_msix(device_t pcib, device_t dev, int *irq)
static bus_dma_tag_t
vmd_get_dma_tag(device_t dev, device_t child)
{
struct vmd_softc *sc = device_get_softc(pcib);
device_t bus;
int ret;
struct vmd_softc *sc = device_get_softc(dev);
if (sc->vmd_flags & PCIB_DISABLE_MSIX)
return (ENXIO);
bus = device_get_parent(pcib);
ret = PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq);
return (ret);
return (sc->vmd_dma_tag);
}
static struct resource *
vmd_alloc_resource(device_t dev, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
/* Start at max PCI vmd_domain and work down */
if (type == PCI_RES_BUS) {
return (pci_domain_alloc_bus(PCI_DOMAINMAX -
device_get_unit(dev), child, rid, start, end,
count, flags));
}
struct vmd_softc *sc = device_get_softc(dev);
struct resource *res;
return (pcib_alloc_resource(dev, child, type, rid, start, end,
count, flags));
switch (type) {
case SYS_RES_IRQ:
/* VMD harwdare does not support legacy interrupts. */
if (*rid == 0)
return (NULL);
return (bus_generic_alloc_resource(dev, child, type, rid,
start, end, count, flags | RF_SHAREABLE));
case SYS_RES_MEMORY:
res = rman_reserve_resource(&sc->psc.mem.rman, start, end,
count, flags, child);
if (res == NULL)
return (NULL);
if (bootverbose)
device_printf(dev,
"allocated memory range (%#jx-%#jx) for rid %d of %s\n",
rman_get_start(res), rman_get_end(res), *rid,
pcib_child_name(child));
break;
case PCI_RES_BUS:
res = rman_reserve_resource(&sc->psc.bus.rman, start, end,
count, flags, child);
if (res == NULL)
return (NULL);
if (bootverbose)
device_printf(dev,
"allocated bus range (%ju-%ju) for rid %d of %s\n",
rman_get_start(res), rman_get_end(res), *rid,
pcib_child_name(child));
break;
default:
/* VMD harwdare does not support I/O ports. */
return (NULL);
}
rman_set_rid(res, *rid);
return (res);
}
static int
vmd_adjust_resource(device_t dev, device_t child, int type,
struct resource *r, rman_res_t start, rman_res_t end)
{
struct resource *res = r;
if (type == PCI_RES_BUS)
return (pci_domain_adjust_bus(PCI_DOMAINMAX -
device_get_unit(dev), child, res, start, end));
return (pcib_adjust_resource(dev, child, type, res, start, end));
if (type == SYS_RES_IRQ) {
return (bus_generic_adjust_resource(dev, child, type, r,
start, end));
}
return (rman_adjust_resource(r, start, end));
}
static int
vmd_release_resource(device_t dev, device_t child, int type, int rid,
struct resource *r)
{
if (type == PCI_RES_BUS)
return (pci_domain_release_bus(PCI_DOMAINMAX -
device_get_unit(dev), child, rid, r));
return (pcib_release_resource(dev, child, type, rid, r));
if (type == SYS_RES_IRQ) {
return (bus_generic_release_resource(dev, child, type, rid,
r));
}
return (rman_release_resource(r));
}
static int
vmd_shutdown(device_t dev)
vmd_route_interrupt(device_t dev, device_t child, int pin)
{
/* VMD harwdare does not support legacy interrupts. */
return (PCI_INVALID_IRQ);
}
static int
vmd_alloc_msi(device_t dev, device_t child, int count, int maxcount,
int *irqs)
{
struct vmd_softc *sc = device_get_softc(dev);
struct vmd_irq_user *u;
int i, ibest = 0, best = INT_MAX;
if (count > vmd_max_msi)
return (ENOSPC);
LIST_FOREACH(u, &sc->vmd_users, viu_link) {
if (u->viu_child == child)
return (EBUSY);
}
for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
if (best > sc->vmd_irq[i].vi_nusers) {
best = sc->vmd_irq[i].vi_nusers;
ibest = i;
}
}
u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
u->viu_child = child;
u->viu_vector = ibest;
LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
sc->vmd_irq[ibest].vi_nusers += count;
for (i = 0; i < count; i++)
irqs[i] = sc->vmd_irq[ibest].vi_irq;
return (0);
}
static int
vmd_pcib_route_interrupt(device_t pcib, device_t dev, int pin)
vmd_release_msi(device_t dev, device_t child, int count, int *irqs)
{
return (pcib_route_interrupt(pcib, dev, pin));
struct vmd_softc *sc = device_get_softc(dev);
struct vmd_irq_user *u;
LIST_FOREACH(u, &sc->vmd_users, viu_link) {
if (u->viu_child == child) {
sc->vmd_irq[u->viu_vector].vi_nusers -= count;
LIST_REMOVE(u, viu_link);
free(u, M_DEVBUF);
return (0);
}
}
return (EINVAL);
}
static int
vmd_pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount,
int *irqs)
vmd_alloc_msix(device_t dev, device_t child, int *irq)
{
return (pcib_alloc_msi(pcib, dev, count, maxcount, irqs));
}
static int
vmd_pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
{
return (pcib_release_msi(pcib, dev, count, irqs));
}
static int
vmd_pcib_release_msix(device_t pcib, device_t dev, int irq) {
return pcib_release_msix(pcib, dev, irq);
}
static int
vmd_setup_intr(device_t dev, device_t child, struct resource *irq,
int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg,
void **cookiep)
{
struct vmd_irq_handler *elm;
struct vmd_softc *sc;
int i;
sc = device_get_softc(dev);
/*
* There appears to be no steering of VMD interrupts from device
* to VMD interrupt
*/
struct vmd_softc *sc = device_get_softc(dev);
struct vmd_irq_user *u;
int i, ibest = 0, best = INT_MAX;
i = 0;
elm = malloc(sizeof(*elm), M_DEVBUF, M_NOWAIT|M_ZERO);
elm->vmd_child = child;
elm->vmd_intr = intr;
elm->vmd_rid = rman_get_rid(irq);
elm->vmd_arg = arg;
TAILQ_INSERT_TAIL(&sc->vmd_irq[i].vmd_list, elm, vmd_link);
LIST_FOREACH(u, &sc->vmd_users, viu_link) {
if (u->viu_child == child)
i++;
}
if (i >= vmd_max_msix)
return (ENOSPC);
return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
arg, cookiep));
}
static int
vmd_teardown_intr(device_t dev, device_t child, struct resource *irq,
void *cookie)
{
struct vmd_irq_handler *elm, *tmp;;
struct vmd_softc *sc;
sc = device_get_softc(dev);
TAILQ_FOREACH_SAFE(elm, &sc->vmd_irq[0].vmd_list, vmd_link, tmp) {
if (elm->vmd_child == child &&
elm->vmd_rid == rman_get_rid(irq)) {
TAILQ_REMOVE(&sc->vmd_irq[0].vmd_list, elm, vmd_link);
free(elm, M_DEVBUF);
for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
if (best > sc->vmd_irq[i].vi_nusers) {
best = sc->vmd_irq[i].vi_nusers;
ibest = i;
}
}
return (bus_generic_teardown_intr(dev, child, irq, cookie));
u = malloc(sizeof(*u), M_DEVBUF, M_WAITOK | M_ZERO);
u->viu_child = child;
u->viu_vector = ibest;
LIST_INSERT_HEAD(&sc->vmd_users, u, viu_link);
sc->vmd_irq[ibest].vi_nusers++;
*irq = sc->vmd_irq[ibest].vi_irq;
return (0);
}
static int
vmd_release_msix(device_t dev, device_t child, int irq)
{
struct vmd_softc *sc = device_get_softc(dev);
struct vmd_irq_user *u;
LIST_FOREACH(u, &sc->vmd_users, viu_link) {
if (u->viu_child == child &&
sc->vmd_irq[u->viu_vector].vi_irq == irq) {
sc->vmd_irq[u->viu_vector].vi_nusers--;
LIST_REMOVE(u, viu_link);
free(u, M_DEVBUF);
return (0);
}
}
return (EINVAL);
}
static int
vmd_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data)
{
struct vmd_softc *sc = device_get_softc(dev);
int i;
for (i = sc->vmd_fist_vector; i < sc->vmd_msix_count; i++) {
if (sc->vmd_irq[i].vi_irq == irq)
break;
}
if (i >= sc->vmd_msix_count)
return (EINVAL);
*addr = MSI_INTEL_ADDR_BASE | (i << 12);
*data = 0;
return (0);
}
static device_method_t vmd_pci_methods[] = {
@ -643,9 +595,12 @@ static device_method_t vmd_pci_methods[] = {
DEVMETHOD(device_probe, vmd_probe),
DEVMETHOD(device_attach, vmd_attach),
DEVMETHOD(device_detach, vmd_detach),
DEVMETHOD(device_shutdown, vmd_shutdown),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* Bus interface */
DEVMETHOD(bus_get_dma_tag, vmd_get_dma_tag),
DEVMETHOD(bus_read_ivar, pcib_read_ivar),
DEVMETHOD(bus_write_ivar, pcib_write_ivar),
DEVMETHOD(bus_alloc_resource, vmd_alloc_resource),
@ -653,32 +608,27 @@ static device_method_t vmd_pci_methods[] = {
DEVMETHOD(bus_release_resource, vmd_release_resource),
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
DEVMETHOD(bus_setup_intr, vmd_setup_intr),
DEVMETHOD(bus_teardown_intr, vmd_teardown_intr),
/* pci interface */
DEVMETHOD(pci_read_config, vmd_pci_read_config),
DEVMETHOD(pci_write_config, vmd_pci_write_config),
DEVMETHOD(pci_alloc_devinfo, vmd_alloc_devinfo),
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
/* pcib interface */
DEVMETHOD(pcib_maxslots, pcib_maxslots),
DEVMETHOD(pcib_read_config, vmd_read_config),
DEVMETHOD(pcib_write_config, vmd_write_config),
DEVMETHOD(pcib_route_interrupt, vmd_pcib_route_interrupt),
DEVMETHOD(pcib_alloc_msi, vmd_pcib_alloc_msi),
DEVMETHOD(pcib_release_msi, vmd_pcib_release_msi),
DEVMETHOD(pcib_route_interrupt, vmd_route_interrupt),
DEVMETHOD(pcib_alloc_msi, vmd_alloc_msi),
DEVMETHOD(pcib_release_msi, vmd_release_msi),
DEVMETHOD(pcib_alloc_msix, vmd_alloc_msix),
DEVMETHOD(pcib_release_msix, vmd_pcib_release_msix),
DEVMETHOD(pcib_map_msi, pcib_map_msi),
DEVMETHOD(pcib_release_msix, vmd_release_msix),
DEVMETHOD(pcib_map_msi, vmd_map_msi),
DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
DEVMETHOD_END
};
static devclass_t vmd_devclass;
static devclass_t pcib_devclass;
DEFINE_CLASS_0(vmd, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
DRIVER_MODULE(vmd, pci, vmd_pci_driver, vmd_devclass, NULL, NULL);
DEFINE_CLASS_0(pcib, vmd_pci_driver, vmd_pci_methods, sizeof(struct vmd_softc));
DRIVER_MODULE(vmd, pci, vmd_pci_driver, pcib_devclass, NULL, NULL);
MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, vmd,
vmd_devs, nitems(vmd_devs) - 1);
MODULE_DEPEND(vmd, vmd_bus, 1, 1, 1);

View File

@ -1,6 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2021 Alexander Motin <mav@FreeBSD.org>
* Copyright 2019 Cisco Systems, Inc.
* All rights reserved.
*
@ -32,59 +33,36 @@
#ifndef __VMD_PRIVATE_H__
#define __VMD_PRIVATE_H__
struct vmd_irq_handler {
TAILQ_ENTRY(vmd_irq_handler) vmd_link;
device_t vmd_child;
driver_intr_t *vmd_intr;
void *vmd_arg;
int vmd_rid;
#include <dev/pci/pcib_private.h>
struct vmd_irq_user {
LIST_ENTRY(vmd_irq_user) viu_link;
device_t viu_child;
int viu_vector;
};
struct vmd_irq {
struct resource *vmd_res;
int vmd_rid;
void *vmd_handle;
struct vmd_softc *vmd_sc;
int vmd_instance;
TAILQ_HEAD(,vmd_irq_handler) vmd_list;
struct resource *vi_res;
int vi_rid;
int vi_irq;
void *vi_handle;
int vi_nusers;
};
/*
* VMD specific data.
*/
struct vmd_softc
{
device_t vmd_dev;
device_t vmd_child;
uint32_t vmd_flags; /* flags */
#define PCIB_SUBTRACTIVE 0x1
#define PCIB_DISABLE_MSI 0x2
#define PCIB_DISABLE_MSIX 0x4
#define PCIB_ENABLE_ARI 0x8
#define PCIB_HOTPLUG 0x10
#define PCIB_HOTPLUG_CMD_PENDING 0x20
#define PCIB_DETACH_PENDING 0x40
#define PCIB_DETACHING 0x80
u_int vmd_domain; /* domain number */
struct pcib_secbus vmd_bus; /* secondary bus numbers */
struct vmd_softc {
struct pcib_softc psc;
#define VMD_MAX_BAR 3
struct resource *vmd_regs_resource[VMD_MAX_BAR];
int vmd_regs_rid[VMD_MAX_BAR];
bus_space_handle_t vmd_bhandle;
bus_space_tag_t vmd_btag;
int vmd_io_rid;
struct resource *vmd_io_resource;
void *vmd_intr;
struct vmd_irq *vmd_irq;
int vmd_msix_count;
uint8_t vmd_bus_start;
#ifdef TASK_QUEUE_INTR
struct taskqueue *vmd_irq_tq;
struct task vmd_irq_task;
#else
struct mtx vmd_irq_lock;
#endif
#define VMD_MAX_BAR 3
int vmd_regs_rid[VMD_MAX_BAR];
struct resource *vmd_regs_res[VMD_MAX_BAR];
bus_space_handle_t vmd_bhandle;
bus_space_tag_t vmd_btag;
struct vmd_irq *vmd_irq;
LIST_HEAD(,vmd_irq_user) vmd_users;
int vmd_fist_vector;
int vmd_msix_count;
uint8_t vmd_bus_start;
uint8_t vmd_bus_end;
};
#endif

View File

@ -1,220 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright 2019 Cisco Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <sys/taskqueue.h>
#include <sys/pciio.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pci_private.h>
#include <dev/pci/pcib_private.h>
#include <dev/pci/pci_host_generic.h>
#include <dev/vmd/vmd.h>
#include "pcib_if.h"
#include "pci_if.h"
static int
vmd_bus_probe(device_t dev)
{
device_set_desc(dev, "VMD bus");
return (-1000);
}
/* PCI interface. */
static int
vmd_bus_attach(device_t dev)
{
struct vmd_softc *sc;
struct pci_devinfo *dinfo;
rman_res_t start, end;
int b, s, f;
int found;
sc = device_get_softc(device_get_parent(dev));
/*
* Start at max PCI vmd_domain and work down. Only VMD
* starting bus is connect to VMD device directly. Scan
* all slots and function connected to starting bus.
*/
b = sc->vmd_bus_start;
found = 0;
for (s = 0; s < PCI_SLOTMAX; s++) {
for (f = 0; f < PCI_FUNCMAX; f++) {
dinfo = pci_read_device(device_get_parent(dev), dev,
PCI_DOMAINMAX - device_get_unit(
device_get_parent(dev)), b, s, f);
if (dinfo != NULL) {
found = 1;
pci_add_child(dev, dinfo);
start = rman_get_start(sc->vmd_regs_resource[1]);
end = rman_get_end(sc->vmd_regs_resource[1]);
resource_list_add_next(&dinfo->resources,
SYS_RES_MEMORY, start, end, end - start + 1);
start = rman_get_start(sc->vmd_io_resource);
end = rman_get_end(sc->vmd_io_resource);
resource_list_add_next(&dinfo->resources,
SYS_RES_IOPORT, start, end, end - start + 1);
}
}
}
if (found) {
bus_generic_attach(dev);
}
return (0);
}
static int
vmd_bus_detach(device_t dev)
{
struct vmd_softc *sc;
struct pci_devinfo *dinfo;
int b, s, f;
device_delete_children(dev);
sc = device_get_softc(device_get_parent(dev));
b = sc->vmd_bus_start;
for (s = 0; s < PCI_SLOTMAX; s++) {
for (f = 0; f < PCI_FUNCMAX; f++) {
dinfo = pci_read_device(device_get_parent(dev), dev,
PCI_DOMAINMAX - device_get_unit(
device_get_parent(dev)), b, s, f);
if (dinfo != NULL)
resource_list_free(&dinfo->resources);
}
}
return (0);
}
static int
vmd_bus_adjust_resource(device_t dev, device_t child, int type,
struct resource *r, rman_res_t start, rman_res_t end)
{
struct resource *res = r;
if (type == SYS_RES_MEMORY) {
/* VMD device controls this */
return (0);
}
return (bus_generic_adjust_resource(dev, child, type, res, start, end));
}
static int
vmd_bus_release_resource(device_t dev, device_t child, int type, int rid,
struct resource *r)
{
if (type == SYS_RES_MEMORY) {
/* VMD device controls this */
return (0);
}
return (pci_release_resource(dev, child, type, rid, r));
}
static struct resource *
vmd_bus_alloc_resource(device_t dev, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
struct vmd_softc *sc;
sc = device_get_softc(device_get_parent(dev));
if (type == SYS_RES_MEMORY) {
/* remap to VMD resources */
if (*rid == PCIR_MEMBASE_1) {
return (sc->vmd_regs_resource[1]);
} else if (*rid == PCIR_PMBASEL_1) {
return (sc->vmd_regs_resource[2]);
} else {
return (sc->vmd_regs_resource[2]);
}
}
return (pci_alloc_resource(dev, child, type, rid, start, end,
count, flags));
}
static int
vmd_bus_shutdown(device_t dev)
{
return (0);
}
static device_method_t vmd_bus_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, vmd_bus_probe),
DEVMETHOD(device_attach, vmd_bus_attach),
DEVMETHOD(device_detach, vmd_bus_detach),
DEVMETHOD(device_shutdown, vmd_bus_shutdown),
/* Bus interface */
DEVMETHOD(bus_alloc_resource, vmd_bus_alloc_resource),
DEVMETHOD(bus_adjust_resource, vmd_bus_adjust_resource),
DEVMETHOD(bus_release_resource, vmd_bus_release_resource),
/* pci interface */
DEVMETHOD(pci_read_config, pci_read_config_method),
DEVMETHOD(pci_write_config, pci_write_config_method),
DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method),
DEVMETHOD_END
};
static devclass_t vmd_bus_devclass;
DEFINE_CLASS_1(pci, vmd_bus_pci_driver, vmd_bus_pci_methods,
sizeof(struct pci_softc), pci_driver);
DRIVER_MODULE(pci, vmd, vmd_bus_pci_driver, vmd_bus_devclass, NULL, NULL);
MODULE_VERSION(vmd_bus, 1);

View File

@ -678,6 +678,10 @@ options ISCI_LOGGING # enable debugging in isci HAL
device nvme # base NVMe driver
device nvd # expose NVMe namespaces as disks, depends on nvme
#
# Intel Volume Management Device (VMD) support
device vmd
#
# PMC-Sierra SAS/SATA controller
device pmspcv

View File

@ -730,6 +730,7 @@ _tpm= tpm
_twa= twa
_vesa= vesa
_viawd= viawd
_vmd= vmd
_wpi= wpi
.if ${MK_SOURCELESS_UCODE} != "no"
_wpifw= wpifw
@ -748,7 +749,6 @@ _pms= pms
_qlxge= qlxge
_qlxgb= qlxgb
_sume= sume
_vmd= vmd
.if ${MK_SOURCELESS_UCODE} != "no"
_qlxgbe= qlxgbe
_qlnx= qlnx

View File

@ -5,7 +5,6 @@
KMOD= vmd
SRCS= \
vmd_bus.c \
vmd.c \
bus_if.h \
device_if.h \