mirror of
https://git.FreeBSD.org/src.git
synced 2024-10-19 02:29:40 +00:00
libpthread: allocate rwlocks and spinlocks in dedicated cachelines
Reduces severe performance degradation due to false-sharing. Note that this does not account for hardware which can perform adjacent cacheline prefetch. [mjg: massaged the commit message and the patch to use aligned_alloc instead of malloc] PR: 272238 MFC after: 1 week
This commit is contained in:
parent
4a402dfe0b
commit
a6c0d801ca
@ -60,7 +60,8 @@ _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
|
||||
if (lock == NULL)
|
||||
return (EINVAL);
|
||||
if (pshared == PTHREAD_PROCESS_PRIVATE) {
|
||||
lck = malloc(sizeof(struct pthread_spinlock));
|
||||
lck = aligned_alloc(CACHE_LINE_SIZE,
|
||||
roundup(sizeof(struct pthread_spinlock), CACHE_LINE_SIZE));
|
||||
if (lck == NULL)
|
||||
return (ENOMEM);
|
||||
*lock = lck;
|
||||
|
@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "namespace.h"
|
||||
#include <pthread.h>
|
||||
@ -102,9 +103,11 @@ rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
|
||||
|
||||
if (attr == NULL || *attr == NULL ||
|
||||
(*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
|
||||
prwlock = calloc(1, sizeof(struct pthread_rwlock));
|
||||
prwlock = aligned_alloc(CACHE_LINE_SIZE,
|
||||
roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE));
|
||||
if (prwlock == NULL)
|
||||
return (ENOMEM);
|
||||
memset(prwlock, 0, sizeof(struct pthread_rwlock));
|
||||
*rwlock = prwlock;
|
||||
} else {
|
||||
prwlock = __thr_pshared_offpage(rwlock, 1);
|
||||
|
Loading…
Reference in New Issue
Block a user