1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-15 10:17:20 +00:00

Add in required instruction serialization prior to releasing the

MP lock for the last time.  The use of a locked instruction to
    cpu-private memory is 3x faster then CPUID and 3x faster then the
    use of a locked instruction to shared memory (the lock itself).

    Instruction serialization is required to ensure that any pending
    memory ops are properly flushed prior to the release of the lock,
    due to out-of-order instruction execution by the cpu.
This commit is contained in:
Matthew Dillon 1999-11-23 18:46:09 +00:00
parent 84eadd6305
commit 87c8f0c0f2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=53639

View File

@ -76,6 +76,10 @@
* ----------------------------------
* Destroys %eax, %ecx. %edx must hold lock argument.
* Note: TPR_TARGET (relative to the stack) is destroyed in GRAB_HWI
*
* NOTE: Serialization is not required if we already hold the lock, since
* we already hold the lock, nor do we need a locked instruction if we
* already hold the lock.
*/
NON_GPROF_ENTRY(MPgetlock_edx)
@ -86,7 +90,7 @@ NON_GPROF_ENTRY(MPgetlock_edx)
cmpl _cpu_lockid, %ecx /* Do we already own the lock? */
jne 2f
incl %eax /* yes, just bump the count */
movl %eax, (%edx)
movl %eax, (%edx) /* serialization not required */
ret
2:
movl $FREE_LOCK, %eax /* lock must be free */
@ -157,6 +161,19 @@ NON_GPROF_ENTRY(MPtrylock)
* void MPrellock_edx(unsigned int *lock : %edx)
* ----------------------------------
* Destroys %ecx, argument must be in %edx
*
* SERIALIZATION NOTE!
*
* The pentium may execute instructions out of order. On a UP system
* this isn't a problem but on an MP system the pentium can get it
* wrong.
*
* We must force instruction serialization prior to releasing the MP lock for
* the last time. 'cpuid' or a locked bus cycle will accomplish this. A
* locked bus cycle is the fastest solution. We use our per-cpu private
* memory area rather then the shared lock memory because we are more likely
* to already have exclusive access to the cache line (which is 3x faster
* then if we have to invalid another cpu's cache).
*/
NON_GPROF_ENTRY(MPrellock_edx)
@ -166,6 +183,8 @@ NON_GPROF_ENTRY(MPrellock_edx)
jnz 2f
ARB_HWI /* last release, arbitrate hw INTs */
movl $FREE_LOCK, %ecx /* - In which case we release it */
lock
addl $0,%fs:0 /* see note above */
2:
movl %ecx, (%edx)
ret