2024-09-09 08:52:07 +00:00
|
|
|
#ifndef _ASM_X86_XOR_64_H
|
|
|
|
#define _ASM_X86_XOR_64_H
|
|
|
|
|
|
|
|
static struct xor_block_template xor_block_sse = {
|
|
|
|
.name = "generic_sse",
|
|
|
|
.do_2 = xor_sse_2,
|
|
|
|
.do_3 = xor_sse_3,
|
|
|
|
.do_4 = xor_sse_4,
|
|
|
|
.do_5 = xor_sse_5,
|
|
|
|
};
|
|
|
|
|
2024-09-09 08:57:42 +00:00
|
|
|
|
|
|
|
/* Also try the AVX routines */
|
|
|
|
#include <asm/xor_avx.h>
|
|
|
|
|
|
|
|
/* We force the use of the SSE xor block because it can write around L2.
|
|
|
|
We may also be able to load into the L1 only depending on how the cpu
|
|
|
|
deals with a load to a line that is being prefetched. */
|
2024-09-09 08:52:07 +00:00
|
|
|
#undef XOR_TRY_TEMPLATES
|
|
|
|
#define XOR_TRY_TEMPLATES \
|
|
|
|
do { \
|
2024-09-09 08:57:42 +00:00
|
|
|
AVX_XOR_SPEED; \
|
|
|
|
xor_speed(&xor_block_sse_pf64); \
|
2024-09-09 08:52:07 +00:00
|
|
|
xor_speed(&xor_block_sse); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#endif /* _ASM_X86_XOR_64_H */
|