x86: unify prefetch operations

This patch moves the prefetch[w]? functions to processor.h

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Glauber de Oliveira Costa 2008-01-30 13:31:40 +01:00 committed by Ingo Molnar
parent 1a53905add
commit ae2e15eb3b
3 changed files with 30 additions and 33 deletions

View File

@ -596,6 +596,36 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32
#define BASE_PREFETCH ASM_NOP4
#define ARCH_HAS_PREFETCH
#else
#define BASE_PREFETCH "prefetcht0 (%1)"
#endif
/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow.
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
static inline void prefetch(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"r" (x));
}
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
static inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
#define spin_lock_prefetch(x) prefetchw(x)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.

View File

@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define ASM_NOP_MAX 8
/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow.
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
static inline void prefetch(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"r" (x));
}
#define ARCH_HAS_PREFETCH
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
static inline void prefetchw(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
#endif /* __ASM_I386_PROCESSOR_H */

View File

@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
#define ASM_NOP_MAX 8
static inline void prefetchw(void *x)
{
alternative_input("prefetcht0 (%1)",
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
#endif /* __ASM_X86_64_PROCESSOR_H */