1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-22 11:17:19 +00:00

Improve LinuxKPI scatter list support.

The i915kms driver in Linux 4.9 reimplement parts of the scatter list
functions with regards to performance. In other words there is not so
much room for changing structure layouts and functionality if the
i915kms should be built AS-IS. This patch aligns the scatter list
support to what is expected by the i915kms driver. Remove some
comments not needed while at it.

Obtained from:		kmacy @
MFC after:		1 week
Sponsored by:		Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2017-02-22 19:31:02 +00:00
parent e3c4285520
commit 8306998f5b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=314105

View File

@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
* Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
* Copyright (c) 2015 Matthew Dillon <dillon@backplane.com>
* All rights reserved.
*
@ -34,18 +34,17 @@
#include <linux/page.h>
#include <linux/slab.h>
#include <linux/mm.h>
struct scatterlist {
union {
struct page *page;
struct scatterlist *sg;
} sl_un;
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t address;
unsigned long offset;
uint32_t length;
uint32_t flags;
};
CTASSERT((sizeof(struct scatterlist) & 0x3) == 0);
struct sg_table {
struct scatterlist *sgl;
unsigned int nents;
@ -56,27 +55,55 @@ struct sg_page_iter {
struct scatterlist *sg;
unsigned int sg_pgoffset;
unsigned int maxents;
struct {
unsigned int nents;
int pg_advance;
} internal;
};
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
#define SG_MAGIC 0x87654321UL
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
#define sg_dma_address(sg) (sg)->address
#define sg_dma_len(sg) (sg)->length
#define sg_page(sg) (sg)->sl_un.page
#define sg_scatternext(sg) (sg)->sl_un.sg
#define SG_END 0x01
#define SG_CHAIN 0x02
#define for_each_sg_page(sgl, iter, nents, pgoffset) \
for (_sg_iter_init(sgl, iter, nents, pgoffset); \
(iter)->sg; _sg_iter_next(iter))
#define for_each_sg(sglist, sg, sgmax, iter) \
for (iter = 0, sg = (sglist); iter < (sgmax); iter++, sg = sg_next(sg))
typedef struct scatterlist *(sg_alloc_fn) (unsigned int, gfp_t);
typedef void (sg_free_fn) (struct scatterlist *, unsigned int);
static inline void
sg_assign_page(struct scatterlist *sg, struct page *page)
{
unsigned long page_link = sg->page_link & 0x3;
sg->page_link = page_link | (unsigned long)page;
}
static inline void
sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
unsigned int offset)
{
sg_page(sg) = page;
sg_dma_len(sg) = len;
sg_assign_page(sg, page);
sg->offset = offset;
if (offset > PAGE_SIZE)
panic("sg_set_page: Invalid offset %d\n", offset);
sg->length = len;
}
static inline struct page *
sg_page(struct scatterlist *sg)
{
return ((struct page *)((sg)->page_link & ~0x3));
}
static inline void
@ -86,28 +113,21 @@ sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen)
((uintptr_t)buf) & (PAGE_SIZE - 1));
}
static inline void
sg_init_table(struct scatterlist *sg, unsigned int nents)
{
bzero(sg, sizeof(*sg) * nents);
sg[nents - 1].flags = SG_END;
}
static inline struct scatterlist *
sg_next(struct scatterlist *sg)
{
if (sg->flags & SG_END)
if (sg_is_last(sg))
return (NULL);
sg++;
if (sg->flags & SG_CHAIN)
sg = sg_scatternext(sg);
if (sg_is_chain(sg))
sg = sg_chain_ptr(sg);
return (sg);
}
static inline vm_paddr_t
sg_phys(struct scatterlist *sg)
{
return sg_page(sg)->phys_addr + sg->offset;
return (VM_PAGE_TO_PHYS(sg_page(sg)) + sg->offset);
}
static inline void
@ -118,18 +138,44 @@ sg_chain(struct scatterlist *prv, unsigned int prv_nents,
sg->offset = 0;
sg->length = 0;
sg->flags = SG_CHAIN;
sg->sl_un.sg = sgl;
}
static inline void
sg_mark_end(struct scatterlist *sg)
{
sg->flags = SG_END;
sg->page_link = ((unsigned long)sgl | 0x01) & ~0x02;
}
static inline void
__sg_free_table(struct sg_table *table, unsigned int max_ents)
sg_mark_end(struct scatterlist *sg)
{
sg->page_link |= 0x02;
sg->page_link &= ~0x01;
}
static inline void
sg_init_table(struct scatterlist *sg, unsigned int nents)
{
bzero(sg, sizeof(*sg) * nents);
sg_mark_end(&sg[nents - 1]);
}
static struct scatterlist *
sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
{
if (nents == SG_MAX_SINGLE_ALLOC) {
return ((void *)__get_free_page(gfp_mask));
} else
return (kmalloc(nents * sizeof(struct scatterlist), gfp_mask));
}
static inline void
sg_kfree(struct scatterlist *sg, unsigned int nents)
{
if (nents == SG_MAX_SINGLE_ALLOC) {
free_page((unsigned long)sg);
} else
kfree(sg);
}
static inline void
__sg_free_table(struct sg_table *table, unsigned int max_ents,
bool skip_first_chunk, sg_free_fn * free_fn)
{
struct scatterlist *sgl, *next;
@ -142,7 +188,7 @@ __sg_free_table(struct sg_table *table, unsigned int max_ents)
unsigned int sg_size;
if (alloc_size > max_ents) {
next = sgl[max_ents - 1].sl_un.sg;
next = sg_chain_ptr(&sgl[max_ents - 1]);
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else {
@ -151,7 +197,10 @@ __sg_free_table(struct sg_table *table, unsigned int max_ents)
}
table->orig_nents -= sg_size;
kfree(sgl);
if (skip_first_chunk)
skip_first_chunk = 0;
else
free_fn(sgl, alloc_size);
sgl = next;
}
@ -161,12 +210,13 @@ __sg_free_table(struct sg_table *table, unsigned int max_ents)
static inline void
sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
}
static inline int
__sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int max_ents, gfp_t gfp_mask)
unsigned int max_ents, struct scatterlist *first_chunk,
gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
@ -174,7 +224,7 @@ __sg_alloc_table(struct sg_table *table, unsigned int nents,
memset(table, 0, sizeof(*table));
if (nents == 0)
return -EINVAL;
return (-EINVAL);
left = nents;
prv = NULL;
do {
@ -189,12 +239,17 @@ __sg_alloc_table(struct sg_table *table, unsigned int nents,
left -= sg_size;
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
if (first_chunk) {
sg = first_chunk;
first_chunk = NULL;
} else {
sg = alloc_fn(alloc_size, gfp_mask);
}
if (unlikely(!sg)) {
if (prv)
table->nents = ++table->orig_nents;
return -ENOMEM;
return (-ENOMEM);
}
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
@ -210,7 +265,7 @@ __sg_alloc_table(struct sg_table *table, unsigned int nents,
prv = sg;
} while (left);
return 0;
return (0);
}
static inline int
@ -219,11 +274,70 @@ sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
gfp_mask);
NULL, gfp_mask, sg_kmalloc);
if (unlikely(ret))
__sg_free_table(table, SG_MAX_SINGLE_ALLOC);
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
return ret;
return (ret);
}
static inline int
sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int count,
unsigned long off, unsigned long size,
gfp_t gfp_mask)
{
unsigned int i, segs, cur;
int rc;
struct scatterlist *s;
for (segs = i = 1; i < count; ++i) {
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
++segs;
}
if (__predict_false((rc = sg_alloc_table(sgt, segs, gfp_mask))))
return (rc);
cur = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
unsigned long seg_size;
unsigned int j;
for (j = cur + 1; j < count; ++j)
if (page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
seg_size = ((j - cur) << PAGE_SHIFT) - off;
sg_set_page(s, pages[cur], min(size, seg_size), off);
size -= seg_size;
off = 0;
cur = j;
}
return (0);
}
static inline int
sg_nents(struct scatterlist *sg)
{
int nents;
for (nents = 0; sg; sg = sg_next(sg))
nents++;
return (nents);
}
static inline void
__sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset)
{
piter->internal.pg_advance = 0;
piter->internal.nents = nents;
piter->sg = sglist;
piter->sg_pgoffset = pgoffset;
}
static inline void
@ -247,6 +361,34 @@ _sg_iter_next(struct sg_page_iter *iter)
iter->sg = sg;
}
static inline int
sg_page_count(struct scatterlist *sg)
{
return (PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT);
}
static inline bool
__sg_page_iter_next(struct sg_page_iter *piter)
{
if (piter->internal.nents == 0)
return (0);
if (piter->sg == NULL)
return (0);
piter->sg_pgoffset += piter->internal.pg_advance;
piter->internal.pg_advance = 1;
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
piter->sg_pgoffset -= sg_page_count(piter->sg);
piter->sg = sg_next(piter->sg);
if (--piter->internal.nents == 0)
return (0);
if (piter->sg == NULL)
return (0);
}
return (1);
}
static inline void
_sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
unsigned int nents, unsigned long pgoffset)
@ -266,14 +408,14 @@ _sg_iter_init(struct scatterlist *sgl, struct sg_page_iter *iter,
static inline dma_addr_t
sg_page_iter_dma_address(struct sg_page_iter *spi)
{
return spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT);
return (spi->sg->address + (spi->sg_pgoffset << PAGE_SHIFT));
}
#define for_each_sg_page(sgl, iter, nents, pgoffset) \
for (_sg_iter_init(sgl, iter, nents, pgoffset); \
(iter)->sg; _sg_iter_next(iter))
static inline struct page *
sg_page_iter_page(struct sg_page_iter *piter)
{
return (nth_page(sg_page(piter->sg), piter->sg_pgoffset));
}
#define for_each_sg(sglist, sg, sgmax, _itr) \
for (_itr = 0, sg = (sglist); _itr < (sgmax); _itr++, sg = sg_next(sg))
#endif /* _LINUX_SCATTERLIST_H_ */