Move vmmeter atomic counters into dedicated cache lines

Prior to the change they were subject to extreme false sharing.
In particular this change shaves about 3 seconds real time of -j 80 buildkernel.

Reviewed by:	alc, markj
Differential Revision:	https://reviews.freebsd.org/D12281
This commit is contained in:
Mateusz Guzik 2017-09-10 19:00:38 +00:00
parent e1275c6805
commit 1c0b34417b
2 changed files with 12 additions and 5 deletions

View File

@ -60,6 +60,12 @@ struct vmtotal {
#if defined(_KERNEL) || defined(_WANT_VMMETER)
#include <sys/counter.h>
#ifdef _KERNEL
#define VMMETER_ALIGNED __aligned(CACHE_LINE_SIZE)
#else
#define VMMETER_ALIGNED
#endif
/*
* System wide statistics counters.
* Locking:
@ -126,14 +132,15 @@ struct vmmeter {
u_int v_free_target; /* (c) pages desired free */
u_int v_free_min; /* (c) pages desired free */
u_int v_free_count; /* (f) pages free */
u_int v_wire_count; /* (a) pages wired down */
u_int v_active_count; /* (q) pages active */
u_int v_inactive_target; /* (c) pages desired inactive */
u_int v_inactive_count; /* (q) pages inactive */
u_int v_laundry_count; /* (q) pages eligible for laundering */
u_int v_pageout_free_min; /* (c) min pages reserved for kernel */
u_int v_interrupt_free_min; /* (c) reserved pages for int code */
u_int v_free_severe; /* (c) severe page depletion point */
u_int v_wire_count VMMETER_ALIGNED; /* (a) pages wired down */
u_int v_active_count VMMETER_ALIGNED; /* (a) pages active */
u_int v_inactive_count VMMETER_ALIGNED; /* (a) pages inactive */
u_int v_laundry_count VMMETER_ALIGNED; /* (a) pages eligible for
laundering */
};
#endif /* _KERNEL || _WANT_VMMETER */

View File

@ -56,7 +56,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_object.h>
#include <sys/sysctl.h>
struct vmmeter vm_cnt = {
struct vmmeter __exclusive_cache_line vm_cnt = {
.v_swtch = EARLY_COUNTER,
.v_trap = EARLY_COUNTER,
.v_syscall = EARLY_COUNTER,