1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-02 12:20:51 +00:00

control some debugging messages with dev.netmap.verbose

add infrastracture to adapt to changes in number of queues
and buffers at runtime
This commit is contained in:
Luigi Rizzo 2013-01-23 03:51:47 +00:00
parent 01c039a19c
commit ae10d1afee
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=245835
3 changed files with 123 additions and 54 deletions

View File

@ -275,6 +275,51 @@ nm_find_bridge(const char *name)
}
#endif /* NM_BRIDGE */
/*
* Fetch configuration from the device, to cope with dynamic
* reconfigurations after loading the module.
*/
static int
netmap_update_config(struct netmap_adapter *na)
{
struct ifnet *ifp = na->ifp;
u_int txr, txd, rxr, rxd;
txr = txd = rxr = rxd = 0;
if (na->nm_config) {
na->nm_config(ifp, &txr, &txd, &rxr, &rxd);
} else {
/* take whatever we had at init time */
txr = na->num_tx_rings;
txd = na->num_tx_desc;
rxr = na->num_rx_rings;
rxd = na->num_rx_desc;
}
if (na->num_tx_rings == txr && na->num_tx_desc == txd &&
na->num_rx_rings == rxr && na->num_rx_desc == rxd)
return 0; /* nothing changed */
if (netmap_verbose || na->refcount > 0) {
D("stored config %s: txring %d x %d, rxring %d x %d",
ifp->if_xname,
na->num_tx_rings, na->num_tx_desc,
na->num_rx_rings, na->num_rx_desc);
D("new config %s: txring %d x %d, rxring %d x %d",
ifp->if_xname, txr, txd, rxr, rxd);
}
if (na->refcount == 0) {
D("configuration changed (but fine)");
na->num_tx_rings = txr;
na->num_tx_desc = txd;
na->num_rx_rings = rxr;
na->num_rx_desc = rxd;
return 0;
}
D("configuration changed while active, this is bad...");
return 1;
}
/*------------- memory allocator -----------------*/
#ifdef NETMAP_MEM2
#include "netmap_mem2.c"
@ -351,7 +396,8 @@ netmap_dtor_locked(void *data)
if (na->refcount <= 0) { /* last instance */
u_int i, j, lim;
D("deleting last netmap instance for %s", ifp->if_xname);
if (netmap_verbose)
D("deleting last instance for %s", ifp->if_xname);
/*
* there is a race here with *_netmap_task() and
* netmap_poll(), which don't run under NETMAP_REG_LOCK.
@ -482,7 +528,8 @@ static int
netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred, u_short *color)
{
D("first mmap for %p", handle);
if (netmap_verbose)
D("first mmap for %p", handle);
return saved_cdev_pager_ops.cdev_pg_ctor(handle,
size, prot, foff, cred, color);
}
@ -491,7 +538,7 @@ static void
netmap_dev_pager_dtor(void *handle)
{
saved_cdev_pager_ops.cdev_pg_dtor(handle);
D("ready to release memory for %p", handle);
ND("ready to release memory for %p", handle);
}
@ -507,7 +554,7 @@ netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
{
vm_object_t obj;
D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
ND("cdev %p foff %jd size %jd objp %p prot %d", cdev,
(intmax_t )*foff, (intmax_t )objsize, objp, prot);
obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
curthread->td_ucred);
@ -515,7 +562,7 @@ netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
if (obj == NULL)
return EINVAL;
if (saved_cdev_pager_ops.cdev_pg_fault == NULL) {
D("initialize cdev_pager_ops");
ND("initialize cdev_pager_ops");
saved_cdev_pager_ops = *(obj->un_pager.devp.ops);
netmap_cdev_pager_ops.cdev_pg_fault =
saved_cdev_pager_ops.cdev_pg_fault;
@ -572,7 +619,9 @@ netmap_mmap(__unused struct cdev *dev,
static int
netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
D("dev %p fflag 0x%x devtype %d td %p", dev, fflag, devtype, td);
if (netmap_verbose)
D("dev %p fflag 0x%x devtype %d td %p",
dev, fflag, devtype, td);
return 0;
}
@ -877,6 +926,7 @@ netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
if (need_lock)
na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
if (netmap_verbose) {
if (ringid & NETMAP_SW_RING)
D("ringid %s set to SW RING", ifp->if_xname);
else if (ringid & NETMAP_HW_RING)
@ -884,6 +934,7 @@ netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
priv->np_qfirst);
else
D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim);
}
return 0;
}
@ -965,6 +1016,7 @@ netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
if (error)
break;
na = NA(ifp); /* retrieve netmap_adapter */
netmap_update_config(na);
nmr->nr_rx_rings = na->num_rx_rings;
nmr->nr_tx_rings = na->num_tx_rings;
nmr->nr_rx_slots = na->num_rx_desc;
@ -1014,6 +1066,8 @@ netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
break;
}
/* ring configuration may have changed, fetch from the card */
netmap_update_config(na);
priv->np_ifp = ifp; /* store the reference */
error = netmap_set_ringid(priv, nmr->nr_ringid);
if (error)
@ -1444,46 +1498,28 @@ netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
* setups.
*/
int
netmap_attach(struct netmap_adapter *na, int num_queues)
netmap_attach(struct netmap_adapter *arg, int num_queues)
{
int n, size;
void *buf;
struct ifnet *ifp = na->ifp;
struct netmap_adapter *na = NULL;
struct ifnet *ifp = arg ? arg->ifp : NULL;
if (ifp == NULL) {
D("ifp not set, giving up");
return EINVAL;
}
/* clear other fields ? */
na->refcount = 0;
if (arg == NULL || ifp == NULL)
goto fail;
na = malloc(sizeof(*na), M_DEVBUF, M_NOWAIT | M_ZERO);
if (na == NULL)
goto fail;
WNA(ifp) = na;
*na = *arg; /* copy everything, trust the driver to not pass junk */
NETMAP_SET_CAPABLE(ifp);
if (na->num_tx_rings == 0)
na->num_tx_rings = num_queues;
na->num_rx_rings = num_queues;
/* on each direction we have N+1 resources
* 0..n-1 are the hardware rings
* n is the ring attached to the stack.
*/
n = na->num_rx_rings + na->num_tx_rings + 2;
size = sizeof(*na) + n * sizeof(struct netmap_kring);
buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (buf) {
WNA(ifp) = buf;
na->tx_rings = (void *)((char *)buf + sizeof(*na));
na->rx_rings = na->tx_rings + na->num_tx_rings + 1;
bcopy(na, buf, sizeof(*na));
NETMAP_SET_CAPABLE(ifp);
na = buf;
/* Core lock initialized here. Others are initialized after
* netmap_if_new.
*/
mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK,
MTX_DEF);
if (na->nm_lock == NULL) {
ND("using default locks for %s", ifp->if_xname);
na->nm_lock = netmap_lock_wrapper;
}
na->refcount = na->na_single = na->na_multi = 0;
/* Core lock initialized here, others after netmap_if_new. */
mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK, MTX_DEF);
if (na->nm_lock == NULL) {
ND("using default locks for %s", ifp->if_xname);
na->nm_lock = netmap_lock_wrapper;
}
#ifdef linux
if (ifp->netdev_ops) {
@ -1493,9 +1529,12 @@ netmap_attach(struct netmap_adapter *na, int num_queues)
}
na->nm_ndo.ndo_start_xmit = linux_netmap_start;
#endif
D("%s for %s", buf ? "ok" : "failed", ifp->if_xname);
D("success for %s", ifp->if_xname);
return 0;
return (buf ? 0 : ENOMEM);
fail:
D("fail, arg %p ifp %p na %p", arg, ifp, na);
return (na ? EINVAL : ENOMEM);
}
@ -1513,6 +1552,10 @@ netmap_detach(struct ifnet *ifp)
mtx_destroy(&na->core_lock);
if (na->tx_rings) { /* XXX should not happen */
D("freeing leftover tx_rings");
free(na->tx_rings, M_DEVBUF);
}
bzero(na, sizeof(*na));
WNA(ifp) = NULL;
free(na, M_DEVBUF);

View File

@ -203,6 +203,9 @@ struct netmap_adapter {
void (*nm_lock)(struct ifnet *, int what, u_int ringid);
int (*nm_txsync)(struct ifnet *, u_int ring, int lock);
int (*nm_rxsync)(struct ifnet *, u_int ring, int lock);
/* return configuration information */
int (*nm_config)(struct ifnet *, u_int *txr, u_int *txd,
u_int *rxr, u_int *rxd);
int bdg_port;
#ifdef linux

View File

@ -388,7 +388,7 @@ netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
netmap_obj_free(p, j);
return;
}
ND("address %p is not contained inside any cluster (%s)",
D("address %p is not contained inside any cluster (%s)",
vaddr, p->name);
}
@ -559,8 +559,9 @@ netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int obj
i = (clustsize & (PAGE_SIZE - 1));
if (i)
clustsize += PAGE_SIZE - i;
D("objsize %d clustsize %d objects %d",
objsize, clustsize, clustentries);
if (netmap_verbose)
D("objsize %d clustsize %d objects %d",
objsize, clustsize, clustentries);
/*
* The number of clusters is n = ceil(objtotal/clustentries)
@ -649,9 +650,10 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
}
}
p->bitmap[0] = ~3; /* objs 0 and 1 is always busy */
D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
p->_numclusters, p->_clustsize >> 10,
p->_memtotal >> 10, p->name);
if (netmap_verbose)
D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
p->_numclusters, p->_clustsize >> 10,
p->_memtotal >> 10, p->name);
return 0;
@ -721,7 +723,7 @@ netmap_memory_finalize(void)
nm_mem.refcount++;
if (nm_mem.refcount > 1) {
D("busy (refcount %d)", nm_mem.refcount);
ND("busy (refcount %d)", nm_mem.refcount);
goto out;
}
@ -796,6 +798,8 @@ static void
netmap_free_rings(struct netmap_adapter *na)
{
int i;
if (!na->tx_rings)
return;
for (i = 0; i < na->num_tx_rings + 1; i++) {
netmap_ring_free(na->tx_rings[i].ring);
na->tx_rings[i].ring = NULL;
@ -804,22 +808,32 @@ netmap_free_rings(struct netmap_adapter *na)
netmap_ring_free(na->rx_rings[i].ring);
na->rx_rings[i].ring = NULL;
}
free(na->tx_rings, M_DEVBUF);
na->tx_rings = na->rx_rings = NULL;
}
/* call with NMA_LOCK held */
/*
* Allocate the per-fd structure netmap_if.
* If this is the first instance, also allocate the krings, rings etc.
*/
static void *
netmap_if_new(const char *ifname, struct netmap_adapter *na)
{
struct netmap_if *nifp;
struct netmap_ring *ring;
ssize_t base; /* handy for relative offsets between rings and nifp */
u_int i, len, ndesc;
u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
u_int i, len, ndesc, ntx, nrx;
struct netmap_kring *kring;
if (netmap_update_config(na)) {
/* configuration mismatch, report and fail */
return NULL;
}
ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
/*
* the descriptor is followed inline by an array of offsets
* to the tx and rx rings in the shared memory region.
@ -840,6 +854,14 @@ netmap_if_new(const char *ifname, struct netmap_adapter *na)
goto final;
}
len = (ntx + nrx) * sizeof(struct netmap_kring);
na->tx_rings = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (na->tx_rings == NULL) {
D("Cannot allocate krings for %s", ifname);
goto cleanup;
}
na->rx_rings = na->tx_rings + ntx;
/*
* First instance, allocate netmap rings and buffers for this card
* The rings are contiguous, but have variable size.
@ -947,5 +969,6 @@ static void
netmap_memory_deref(void)
{
nm_mem.refcount--;
D("refcount = %d", nm_mem.refcount);
if (netmap_verbose)
D("refcount = %d", nm_mem.refcount);
}