1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-12 09:58:36 +00:00

Add domain policy allocation for amd64 fpu_kern_ctx

Like other types of allocation, fpu_kern_ctx are frequently allocated per-cpu.
Provide the API and sketch some example consumers.

fpu_kern_alloc_ctx_domain() preferentially allocates memory from the
provided domain, and falls back to other domains if that one is empty
(DOMAINSET_PREF(domain) policy).

Maybe it makes more sense to just shove one of these in the DPCPU area
sooner or later -- left for future work.

Reviewed by:	markj
Differential Revision:	https://reviews.freebsd.org/D22053
This commit is contained in:
Conrad Meyer 2020-07-03 14:54:46 +00:00
parent 58199a7052
commit c74a3041f0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362913
4 changed files with 36 additions and 10 deletions

View File

@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/domainset.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
@ -1030,17 +1031,31 @@ struct fpu_kern_ctx {
char hwstate1[];
};
static inline size_t __pure2
fpu_kern_alloc_sz(u_int max_est)
{
return (sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN + max_est);
}
static inline int __pure2
fpu_kern_malloc_flags(u_int fpflags)
{
return (((fpflags & FPU_KERN_NOWAIT) ? M_NOWAIT : M_WAITOK) | M_ZERO);
}
struct fpu_kern_ctx *
fpu_kern_alloc_ctx_domain(int domain, u_int flags)
{
return (malloc_domainset(fpu_kern_alloc_sz(cpu_max_ext_state_size),
M_FPUKERN_CTX, DOMAINSET_PREF(domain),
fpu_kern_malloc_flags(flags)));
}
struct fpu_kern_ctx *
fpu_kern_alloc_ctx(u_int flags)
{
struct fpu_kern_ctx *res;
size_t sz;
sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
cpu_max_ext_state_size;
res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
M_NOWAIT : M_WAITOK) | M_ZERO);
return (res);
return (malloc(fpu_kern_alloc_sz(cpu_max_ext_state_size),
M_FPUKERN_CTX, fpu_kern_malloc_flags(flags)));
}
void

View File

@ -71,6 +71,7 @@ int fputrap_sse(void);
int fputrap_x87(void);
void fpuuserinited(struct thread *td);
struct fpu_kern_ctx *fpu_kern_alloc_ctx(u_int flags);
struct fpu_kern_ctx *fpu_kern_alloc_ctx_domain(int domain, u_int flags);
void fpu_kern_free_ctx(struct fpu_kern_ctx *ctx);
void fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx,
u_int flags);

View File

@ -180,7 +180,12 @@ aesni_attach(device_t dev)
M_WAITOK|M_ZERO);
CPU_FOREACH(i) {
ctx_fpu[i] = fpu_kern_alloc_ctx(0);
#ifdef __amd64__
ctx_fpu[i] = fpu_kern_alloc_ctx_domain(
pcpu_find(i)->pc_domain, FPU_KERN_NORMAL);
#else
ctx_fpu[i] = fpu_kern_alloc_ctx(FPU_KERN_NORMAL);
#endif
mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW);
}

View File

@ -142,7 +142,12 @@ blake2_attach(device_t dev)
M_WAITOK | M_ZERO);
CPU_FOREACH(i) {
ctx_fpu[i] = fpu_kern_alloc_ctx(0);
#ifdef __amd64__
ctx_fpu[i] = fpu_kern_alloc_ctx_domain(
pcpu_find(i)->pc_domain, FPU_KERN_NORMAL);
#else
ctx_fpu[i] = fpu_kern_alloc_ctx(FPU_KERN_NORMAL);
#endif
mtx_init(&ctx_mtx[i], "bl2fpumtx", NULL, MTX_DEF | MTX_NEW);
}