mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-21 11:13:30 +00:00
Rework the known mutexes to benefit about staying on their own
cache line in order to avoid manual frobbing but using struct mtx_padalign. The sole exception being nvme and sxfge drivers, where the author redefined CACHE_LINE_SIZE manually, so they need to be analyzed and dealt with separately. Reviwed by: jimharris, alc
This commit is contained in:
parent
84e7a2ebb7
commit
4ceaf45de5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=242402
@ -119,8 +119,8 @@ struct cc_mig_ent {
|
||||
* when the callout should be served.
|
||||
*/
|
||||
struct callout_cpu {
|
||||
struct mtx cc_lock;
|
||||
struct cc_mig_ent cc_migrating_entity __aligned(CACHE_LINE_SIZE);
|
||||
struct mtx_padalign cc_lock;
|
||||
struct cc_mig_ent cc_migrating_entity;
|
||||
struct callout *cc_callout;
|
||||
struct callout_tailq *cc_callwheel;
|
||||
struct callout_list cc_callfree;
|
||||
|
@ -228,8 +228,7 @@ struct tdq {
|
||||
* tdq_lock is padded to avoid false sharing with tdq_load and
|
||||
* tdq_cpu_idle.
|
||||
*/
|
||||
struct mtx tdq_lock; /* run queue lock. */
|
||||
char pad[64 - sizeof(struct mtx)];
|
||||
struct mtx_padalign tdq_lock; /* run queue lock. */
|
||||
struct cpu_group *tdq_cg; /* Pointer to cpu topology. */
|
||||
volatile int tdq_load; /* Aggregate load. */
|
||||
volatile int tdq_cpu_idle; /* cpu_idle() is active. */
|
||||
@ -292,7 +291,7 @@ static struct tdq tdq_cpu;
|
||||
#define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t)))
|
||||
#define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
|
||||
#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
|
||||
#define TDQ_LOCKPTR(t) (&(t)->tdq_lock)
|
||||
#define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock))
|
||||
|
||||
static void sched_priority(struct thread *);
|
||||
static void sched_thread_priority(struct thread *, u_char);
|
||||
|
@ -116,10 +116,10 @@ __FBSDID("$FreeBSD$");
|
||||
*/
|
||||
|
||||
struct vpgqueues vm_page_queues[PQ_COUNT];
|
||||
struct vpglocks vm_page_queue_lock;
|
||||
struct vpglocks vm_page_queue_free_lock;
|
||||
struct mtx_padalign vm_page_queue_mtx;
|
||||
struct mtx_padalign vm_page_queue_free_mtx;
|
||||
|
||||
struct vpglocks pa_lock[PA_LOCK_COUNT];
|
||||
struct mtx_padalign pa_lock[PA_LOCK_COUNT];
|
||||
|
||||
vm_page_t vm_page_array;
|
||||
long vm_page_array_size;
|
||||
@ -298,7 +298,7 @@ vm_page_startup(vm_offset_t vaddr)
|
||||
MTX_RECURSE);
|
||||
mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
|
||||
for (i = 0; i < PA_LOCK_COUNT; i++)
|
||||
mtx_init(&pa_lock[i].data, "vm page", NULL, MTX_DEF);
|
||||
mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
|
||||
|
||||
/*
|
||||
* Initialize the queue headers for the hold queue, the active queue,
|
||||
|
@ -187,13 +187,8 @@ struct vpgqueues {
|
||||
|
||||
extern struct vpgqueues vm_page_queues[PQ_COUNT];
|
||||
|
||||
struct vpglocks {
|
||||
struct mtx data;
|
||||
char pad[CACHE_LINE_SIZE - sizeof(struct mtx)];
|
||||
} __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
extern struct vpglocks vm_page_queue_free_lock;
|
||||
extern struct vpglocks pa_lock[];
|
||||
extern struct mtx_padalign vm_page_queue_free_mtx;
|
||||
extern struct mtx_padalign pa_lock[];
|
||||
|
||||
#if defined(__arm__)
|
||||
#define PDRSHIFT PDR_SHIFT
|
||||
@ -202,7 +197,7 @@ extern struct vpglocks pa_lock[];
|
||||
#endif
|
||||
|
||||
#define pa_index(pa) ((pa) >> PDRSHIFT)
|
||||
#define PA_LOCKPTR(pa) &pa_lock[pa_index((pa)) % PA_LOCK_COUNT].data
|
||||
#define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
|
||||
#define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
|
||||
#define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
|
||||
#define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
|
||||
@ -235,8 +230,6 @@ extern struct vpglocks pa_lock[];
|
||||
#define vm_page_lock_assert(m, a) mtx_assert(vm_page_lockptr((m)), (a))
|
||||
#endif
|
||||
|
||||
#define vm_page_queue_free_mtx vm_page_queue_free_lock.data
|
||||
|
||||
/*
|
||||
* The vm_page's aflags are updated using atomic operations. To set or clear
|
||||
* these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
|
||||
@ -327,9 +320,8 @@ vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa);
|
||||
|
||||
vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
|
||||
|
||||
extern struct vpglocks vm_page_queue_lock;
|
||||
extern struct mtx_padalign vm_page_queue_mtx;
|
||||
|
||||
#define vm_page_queue_mtx vm_page_queue_lock.data
|
||||
#define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx)
|
||||
#define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user