blob: 9bc861c71e754ed37a83d600de302b29baa9a974 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* Copyright 2002 Andi Kleen, SuSE Labs */
Jan Beulich8d379da2006-09-26 10:52:32 +02003
Jan Beulich8d379da2006-09-26 10:52:32 +02004#include <linux/linkage.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +01005#include <asm/cpufeatures.h>
Fenghua Yu2f19e062011-05-17 15:29:18 -07006#include <asm/alternative-asm.h>
Al Viro784d5692016-01-11 11:04:34 -05007#include <asm/export.h>
Jan Beulich8d379da2006-09-26 10:52:32 +02008
Borislav Petkov84d95ad2015-02-04 08:57:00 +01009.weak memset
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011/*
Fenghua Yu2f19e062011-05-17 15:29:18 -070012 * ISO C memset - set a memory block to a byte value. This function uses fast
13 * string to get better performance than the original function. The code is
Adam Buchbinder6a6256f2016-02-23 15:34:30 -080014 * simpler and shorter than the original function as well.
Borislav Petkov84d95ad2015-02-04 08:57:00 +010015 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * rdi destination
Borislav Petkov84d95ad2015-02-04 08:57:00 +010017 * rsi value (char)
18 * rdx count (bytes)
19 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 * rax original destination
Borislav Petkov84d95ad2015-02-04 08:57:00 +010021 */
22ENTRY(memset)
23ENTRY(__memset)
24 /*
25 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
26 * to use it when possible. If not available, use fast string instructions.
27 *
28 * Otherwise, use original memset function.
29 */
30 ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
31 "jmp memset_erms", X86_FEATURE_ERMS
32
Jan Beulich8d379da2006-09-26 10:52:32 +020033 movq %rdi,%r9
Jan Beulich5d7244e2012-01-05 16:10:42 +000034 movq %rdx,%rcx
35 andl $7,%edx
36 shrq $3,%rcx
Jan Beulich8d379da2006-09-26 10:52:32 +020037 /* expand byte value */
38 movzbl %sil,%esi
39 movabs $0x0101010101010101,%rax
Jan Beulich5d7244e2012-01-05 16:10:42 +000040 imulq %rsi,%rax
Jan Beulich8d379da2006-09-26 10:52:32 +020041 rep stosq
Jan Beulich5d7244e2012-01-05 16:10:42 +000042 movl %edx,%ecx
Jan Beulich8d379da2006-09-26 10:52:32 +020043 rep stosb
44 movq %r9,%rax
45 ret
Borislav Petkov84d95ad2015-02-04 08:57:00 +010046ENDPROC(memset)
47ENDPROC(__memset)
Al Viro784d5692016-01-11 11:04:34 -050048EXPORT_SYMBOL(memset)
49EXPORT_SYMBOL(__memset)
Jan Beulich8d379da2006-09-26 10:52:32 +020050
Fenghua Yu2f19e062011-05-17 15:29:18 -070051/*
52 * ISO C memset - set a memory block to a byte value. This function uses
53 * enhanced rep stosb to override the fast string function.
54 * The code is simpler and shorter than the fast string function as well.
55 *
56 * rdi destination
57 * rsi value (char)
58 * rdx count (bytes)
59 *
60 * rax original destination
61 */
Borislav Petkov84d95ad2015-02-04 08:57:00 +010062ENTRY(memset_erms)
Fenghua Yu2f19e062011-05-17 15:29:18 -070063 movq %rdi,%r9
64 movb %sil,%al
Jan Beulich5d7244e2012-01-05 16:10:42 +000065 movq %rdx,%rcx
Fenghua Yu2f19e062011-05-17 15:29:18 -070066 rep stosb
67 movq %r9,%rax
68 ret
Borislav Petkov84d95ad2015-02-04 08:57:00 +010069ENDPROC(memset_erms)
Fenghua Yu2f19e062011-05-17 15:29:18 -070070
Borislav Petkov84d95ad2015-02-04 08:57:00 +010071ENTRY(memset_orig)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010072 movq %rdi,%r10
Andi Kleen7bcd3f32006-02-03 21:51:02 +010073
74 /* expand byte value */
75 movzbl %sil,%ecx
76 movabs $0x0101010101010101,%rax
Jan Beulich5d7244e2012-01-05 16:10:42 +000077 imulq %rcx,%rax
Andi Kleen7bcd3f32006-02-03 21:51:02 +010078
79 /* align dst */
80 movl %edi,%r9d
81 andl $7,%r9d
82 jnz .Lbad_alignment
83.Lafter_bad_alignment:
84
Jan Beulich5d7244e2012-01-05 16:10:42 +000085 movq %rdx,%rcx
86 shrq $6,%rcx
Andi Kleen7bcd3f32006-02-03 21:51:02 +010087 jz .Lhandle_tail
88
89 .p2align 4
90.Lloop_64:
Jan Beulich5d7244e2012-01-05 16:10:42 +000091 decq %rcx
Andi Kleen7bcd3f32006-02-03 21:51:02 +010092 movq %rax,(%rdi)
93 movq %rax,8(%rdi)
94 movq %rax,16(%rdi)
95 movq %rax,24(%rdi)
96 movq %rax,32(%rdi)
97 movq %rax,40(%rdi)
98 movq %rax,48(%rdi)
99 movq %rax,56(%rdi)
100 leaq 64(%rdi),%rdi
101 jnz .Lloop_64
102
103 /* Handle tail in loops. The loops should be faster than hard
104 to predict jump tables. */
105 .p2align 4
106.Lhandle_tail:
Jan Beulich5d7244e2012-01-05 16:10:42 +0000107 movl %edx,%ecx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100108 andl $63&(~7),%ecx
109 jz .Lhandle_7
110 shrl $3,%ecx
111 .p2align 4
112.Lloop_8:
113 decl %ecx
114 movq %rax,(%rdi)
115 leaq 8(%rdi),%rdi
116 jnz .Lloop_8
117
118.Lhandle_7:
Jan Beulich5d7244e2012-01-05 16:10:42 +0000119 andl $7,%edx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100120 jz .Lende
121 .p2align 4
122.Lloop_1:
Jan Beulich5d7244e2012-01-05 16:10:42 +0000123 decl %edx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100124 movb %al,(%rdi)
125 leaq 1(%rdi),%rdi
126 jnz .Lloop_1
127
128.Lende:
129 movq %r10,%rax
130 ret
131
132.Lbad_alignment:
Jan Beulich5d7244e2012-01-05 16:10:42 +0000133 cmpq $7,%rdx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100134 jbe .Lhandle_7
135 movq %rax,(%rdi) /* unaligned store */
136 movq $8,%r8
137 subq %r9,%r8
138 addq %r8,%rdi
Jan Beulich5d7244e2012-01-05 16:10:42 +0000139 subq %r8,%rdx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100140 jmp .Lafter_bad_alignment
Jan Beulich8d379da2006-09-26 10:52:32 +0200141.Lfinal:
Borislav Petkov84d95ad2015-02-04 08:57:00 +0100142ENDPROC(memset_orig)