1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-15 10:17:20 +00:00

Tidy up the locking of the bounce pages structures.

- Use SYSINIT to initialize the structures instead of checking
  total_bpages against 0 in alloc_bounce_pages(), which could lead
  to several initializations being done at the same time.
- Add missing locking in bus_dmamap_load(), the bounce pages mutex
  must be held when calling reserve_bounce_pages() and when touching
  the bounce_map_waitinglist list.
- Remove the useless splhigh() and splx() calls.
- Assert that the bounce pages mutex is held in reserve_bounce_pages()
  to catch regressions.
This commit is contained in:
Maxime Henrion 2003-03-17 17:26:39 +00:00
parent fabc845e37
commit 379711437f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=112344

View File

@ -30,6 +30,7 @@
#include <sys/bus.h>
#include <sys/systm.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
@ -99,6 +100,7 @@ static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
static struct bus_dmamap nobounce_dmamap;
static void init_bounce_pages(void *dummy);
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
@ -489,9 +491,7 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
int s;
s = splhigh();
mtx_lock(&bounce_lock);
if (reserve_bounce_pages(dmat, map) != 0) {
/* Queue us for resources */
@ -502,11 +502,10 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
map->callback_arg = callback_arg;
STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
splx(s);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
splx(s);
mtx_unlock(&bounce_lock);
}
vaddr = (vm_offset_t)buf;
@ -826,19 +825,27 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
}
}
static void
init_bounce_pages(void *dummy __unused)
{
free_bpages = 0;
reserved_bpages = 0;
active_bpages = 0;
total_bpages = 0;
STAILQ_INIT(&bounce_page_list);
STAILQ_INIT(&bounce_map_waitinglist);
STAILQ_INIT(&bounce_map_callbacklist);
mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
}
SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
static int
alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
{
int count;
count = 0;
if (total_bpages == 0) {
mtx_init(&bounce_lock, "BouncePage", NULL, MTX_DEF);
STAILQ_INIT(&bounce_page_list);
STAILQ_INIT(&bounce_map_waitinglist);
STAILQ_INIT(&bounce_map_callbacklist);
}
while (numpages > 0) {
struct bounce_page *bpage;
@ -875,6 +882,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
{
int pages;
mtx_assert(&bounce_lock, MA_OWNED);
pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
free_bpages -= pages;
reserved_bpages += pages;