1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-16 10:20:30 +00:00

Use SFENCE for ordering CLFLUSHOPT.

SDM states that CLFLUSHOPT instructions can be ordered with other
writes by SFENCE, heavier MFENCE is not required.

Reviewed by:	alc
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2017-01-20 19:08:44 +00:00
parent dd1badb4a3
commit 5611aaa195
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=312555
4 changed files with 33 additions and 11 deletions

View File

@ -1862,16 +1862,16 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
return;
/*
* Otherwise, do per-cache line flush. Use the mfence
* Otherwise, do per-cache line flush. Use the sfence
* instruction to insure that previous stores are
* included in the write-back. The processor
* propagates flush to other processors in the cache
* coherence domain.
*/
mfence();
sfence();
for (; sva < eva; sva += cpu_clflush_line_size)
clflushopt(sva);
mfence();
sfence();
} else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
if (pmap_kextract(sva) == lapic_paddr)
@ -1915,7 +1915,9 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count)
((cpu_feature & CPUID_CLFSH) == 0 && !useclflushopt))
pmap_invalidate_cache();
else {
if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
if (useclflushopt)
sfence();
else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
for (i = 0; i < count; i++) {
daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
@ -1927,7 +1929,9 @@ pmap_invalidate_cache_pages(vm_page_t *pages, int count)
clflush(daddr);
}
}
if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
if (useclflushopt)
sfence();
else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
}
}

View File

@ -326,6 +326,13 @@ mfence(void)
__asm __volatile("mfence" : : : "memory");
}
static __inline void
sfence(void)
{
__asm __volatile("sfence" : : : "memory");
}
static __inline void
ia32_pause(void)
{

View File

@ -1283,16 +1283,16 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
return;
#endif
/*
* Otherwise, do per-cache line flush. Use the mfence
* Otherwise, do per-cache line flush. Use the sfence
* instruction to insure that previous stores are
* included in the write-back. The processor
* propagates flush to other processors in the cache
* coherence domain.
*/
mfence();
sfence();
for (; sva < eva; sva += cpu_clflush_line_size)
clflushopt(sva);
mfence();
sfence();
} else if ((cpu_feature & CPUID_CLFSH) != 0 &&
eva - sva < PMAP_CLFLUSH_THRESHOLD) {
#ifdef DEV_APIC
@ -5300,12 +5300,14 @@ pmap_flush_page(vm_page_t m)
eva = sva + PAGE_SIZE;
/*
* Use mfence despite the ordering implied by
* Use mfence or sfence despite the ordering implied by
* mtx_{un,}lock() because clflush on non-Intel CPUs
* and clflushopt are not guaranteed to be ordered by
* any other instruction.
*/
if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
if (useclflushopt)
sfence();
else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
for (; sva < eva; sva += cpu_clflush_line_size) {
if (useclflushopt)
@ -5313,7 +5315,9 @@ pmap_flush_page(vm_page_t m)
else
clflush(sva);
}
if (useclflushopt || cpu_vendor_id != CPU_VENDOR_INTEL)
if (useclflushopt)
sfence();
else if (cpu_vendor_id != CPU_VENDOR_INTEL)
mfence();
*cmap_pte2 = 0;
sched_unpin();

View File

@ -158,6 +158,13 @@ mfence(void)
__asm __volatile("mfence" : : : "memory");
}
static __inline void
sfence(void)
{
__asm __volatile("sfence" : : : "memory");
}
#ifdef _KERNEL
#define HAVE_INLINE_FFS