1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build go1.7,amd64,!gccgo,!appengine
9 DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
10 DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
11 DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
12 DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
13 GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
15 DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
16 DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
17 DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
18 DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
19 GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
21 DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
22 DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
23 DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
24 DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
25 GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
27 DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
28 DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
29 DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
30 DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
31 GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
33 DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
34 DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
35 GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
37 DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
38 DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
39 GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
41 DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
42 DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
43 GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
45 DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
46 DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
47 GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
49 DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
50 DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
51 GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
53 DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
54 DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
55 GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
57 #define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
58 #define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
59 #define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
60 #define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
61 #define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
63 #define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
67 VPSHUFD $-79, Y3, Y3; \
70 VPSHUFB c40, Y1, Y1; \
74 VPSHUFB c48, Y3, Y3; \
86 VPSHUFD $-79, Y3, Y3; \
89 VPSHUFB c40, Y1, Y1; \
93 VPSHUFB c48, Y3, Y3; \
103 #define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
104 #define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
105 #define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
106 #define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
107 #define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
109 #define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
110 #define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
111 #define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
112 #define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
113 #define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
115 #define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
116 #define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
117 #define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
118 #define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
119 #define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
121 #define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
122 #define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
123 #define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
124 #define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
125 #define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
127 #define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
128 #define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
130 // load msg: Y12 = (i0, i1, i2, i3)
131 // i0, i1, i2, i3 must not be 0
132 #define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
133 VMOVQ_SI_X12(i0*8); \
134 VMOVQ_SI_X11(i2*8); \
135 VPINSRQ_1_SI_X12(i1*8); \
136 VPINSRQ_1_SI_X11(i3*8); \
137 VINSERTI128 $1, X11, Y12, Y12
139 // load msg: Y13 = (i0, i1, i2, i3)
140 // i0, i1, i2, i3 must not be 0
141 #define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
142 VMOVQ_SI_X13(i0*8); \
143 VMOVQ_SI_X11(i2*8); \
144 VPINSRQ_1_SI_X13(i1*8); \
145 VPINSRQ_1_SI_X11(i3*8); \
146 VINSERTI128 $1, X11, Y13, Y13
148 // load msg: Y14 = (i0, i1, i2, i3)
149 // i0, i1, i2, i3 must not be 0
150 #define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
151 VMOVQ_SI_X14(i0*8); \
152 VMOVQ_SI_X11(i2*8); \
153 VPINSRQ_1_SI_X14(i1*8); \
154 VPINSRQ_1_SI_X11(i3*8); \
155 VINSERTI128 $1, X11, Y14, Y14
157 // load msg: Y15 = (i0, i1, i2, i3)
158 // i0, i1, i2, i3 must not be 0
159 #define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
160 VMOVQ_SI_X15(i0*8); \
161 VMOVQ_SI_X11(i2*8); \
162 VPINSRQ_1_SI_X15(i1*8); \
163 VPINSRQ_1_SI_X11(i3*8); \
164 VINSERTI128 $1, X11, Y15, Y15
166 #define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
169 VPINSRQ_1_SI_X12(2*8); \
170 VPINSRQ_1_SI_X11(6*8); \
171 VINSERTI128 $1, X11, Y12, Y12; \
172 LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
173 LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
174 LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
176 #define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
177 LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
178 LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
179 VMOVQ_SI_X11(11*8); \
180 VPSHUFD $0x4E, 0*8(SI), X14; \
181 VPINSRQ_1_SI_X11(5*8); \
182 VINSERTI128 $1, X11, Y14, Y14; \
183 LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
185 #define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
187 VMOVDQU 11*8(SI), X12; \
188 VPINSRQ_1_SI_X11(15*8); \
189 VINSERTI128 $1, X11, Y12, Y12; \
192 VPINSRQ_1_SI_X13_0; \
193 VPINSRQ_1_SI_X11(13*8); \
194 VINSERTI128 $1, X11, Y13, Y13; \
195 LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
196 LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
198 #define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
199 LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
200 LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
201 LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
204 VPINSRQ_1_SI_X15(10*8); \
205 VPINSRQ_1_SI_X11(8*8); \
206 VINSERTI128 $1, X11, Y15, Y15
208 #define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
209 LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
212 VPINSRQ_1_SI_X13(7*8); \
213 VPINSRQ_1_SI_X11(15*8); \
214 VINSERTI128 $1, X11, Y13, Y13; \
215 LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
216 LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
218 #define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
221 VPINSRQ_1_SI_X12(6*8); \
222 VPINSRQ_1_SI_X11(8*8); \
223 VINSERTI128 $1, X11, Y12, Y12; \
224 LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
225 LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
226 LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
228 #define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
229 LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
230 LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
232 VPSHUFD $0x4E, 8*8(SI), X11; \
233 VPINSRQ_1_SI_X14(6*8); \
234 VINSERTI128 $1, X11, Y14, Y14; \
235 LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
237 #define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
238 LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
239 LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
240 LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
243 VPINSRQ_1_SI_X15(4*8); \
244 VPINSRQ_1_SI_X11(10*8); \
245 VINSERTI128 $1, X11, Y15, Y15
247 #define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
249 VMOVQ_SI_X11(11*8); \
250 VPINSRQ_1_SI_X12(14*8); \
251 VPINSRQ_1_SI_X11_0; \
252 VINSERTI128 $1, X11, Y12, Y12; \
253 LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
255 VMOVDQU 12*8(SI), X14; \
256 VPINSRQ_1_SI_X11(10*8); \
257 VINSERTI128 $1, X11, Y14, Y14; \
259 VMOVDQU 4*8(SI), X11; \
260 VPINSRQ_1_SI_X15(7*8); \
261 VINSERTI128 $1, X11, Y15, Y15
263 #define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
264 LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
266 VPSHUFD $0x4E, 5*8(SI), X11; \
267 VPINSRQ_1_SI_X13(4*8); \
268 VINSERTI128 $1, X11, Y13, Y13; \
269 LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
270 VMOVQ_SI_X15(11*8); \
271 VMOVQ_SI_X11(12*8); \
272 VPINSRQ_1_SI_X15(14*8); \
273 VPINSRQ_1_SI_X11_0; \
274 VINSERTI128 $1, X11, Y15, Y15
276 // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
277 TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
281 MOVQ blocks_base+24(FP), SI
282 MOVQ blocks_len+32(FP), DI
294 VMOVDQU ·AVX2_c40<>(SB), Y4
295 VMOVDQU ·AVX2_c48<>(SB), Y5
299 VMOVDQU ·AVX2_iv0<>(SB), Y6
300 VMOVDQU ·AVX2_iv1<>(SB), Y7
320 LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
325 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
326 LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
332 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
333 LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
334 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
335 LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
336 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
337 LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
338 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
339 LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
340 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
341 LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
342 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
343 LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
344 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
345 LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
346 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
347 LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
348 ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
350 ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
351 ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
372 #define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
373 #define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
374 #define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
375 #define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
376 #define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
378 #define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
379 #define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
380 #define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
381 #define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
382 #define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
383 #define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
384 #define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
385 #define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
387 #define SHUFFLE_AVX() \
391 VPUNPCKLQDQ_X13_X13_X15; \
394 VPUNPCKHQDQ_X15_X7_X6; \
395 VPUNPCKLQDQ_X7_X7_X15; \
396 VPUNPCKHQDQ_X15_X13_X7; \
397 VPUNPCKLQDQ_X3_X3_X15; \
398 VPUNPCKHQDQ_X15_X2_X2; \
399 VPUNPCKLQDQ_X14_X14_X15; \
400 VPUNPCKHQDQ_X15_X3_X3; \
402 #define SHUFFLE_AVX_INV() \
405 VPUNPCKLQDQ_X2_X2_X15; \
407 VPUNPCKHQDQ_X15_X3_X2; \
409 VPUNPCKLQDQ_X3_X3_X15; \
411 VPUNPCKHQDQ_X15_X13_X3; \
412 VPUNPCKLQDQ_X7_X7_X15; \
413 VPUNPCKHQDQ_X15_X6_X6; \
414 VPUNPCKLQDQ_X14_X14_X15; \
415 VPUNPCKHQDQ_X15_X7_X7; \
417 #define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
424 VPSHUFD $-79, v6, v6; \
425 VPSHUFD $-79, v7, v7; \
430 VPSHUFB c40, v2, v2; \
431 VPSHUFB c40, v3, v3; \
438 VPSHUFB c48, v6, v6; \
439 VPSHUFB c48, v7, v7; \
445 VPSRLQ $63, v2, v2; \
448 VPSRLQ $63, v3, v3; \
451 // load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
452 // i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
453 #define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
454 VMOVQ_SI_X12(i0*8); \
455 VMOVQ_SI_X13(i2*8); \
456 VMOVQ_SI_X14(i4*8); \
457 VMOVQ_SI_X15(i6*8); \
458 VPINSRQ_1_SI_X12(i1*8); \
459 VPINSRQ_1_SI_X13(i3*8); \
460 VPINSRQ_1_SI_X14(i5*8); \
461 VPINSRQ_1_SI_X15(i7*8)
463 // load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
464 #define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
469 VPINSRQ_1_SI_X12(2*8); \
470 VPINSRQ_1_SI_X13(6*8); \
471 VPINSRQ_1_SI_X14(3*8); \
472 VPINSRQ_1_SI_X15(7*8)
474 // load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
475 #define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
476 VPSHUFD $0x4E, 0*8(SI), X12; \
477 VMOVQ_SI_X13(11*8); \
478 VMOVQ_SI_X14(12*8); \
480 VPINSRQ_1_SI_X13(5*8); \
481 VPINSRQ_1_SI_X14(2*8); \
482 VPINSRQ_1_SI_X15(3*8)
484 // load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
485 #define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
486 VMOVDQU 11*8(SI), X12; \
490 VPINSRQ_1_SI_X13(15*8); \
491 VPINSRQ_1_SI_X14_0; \
492 VPINSRQ_1_SI_X15(13*8)
494 // load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
495 #define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
500 VPINSRQ_1_SI_X12(5*8); \
501 VPINSRQ_1_SI_X13(15*8); \
502 VPINSRQ_1_SI_X14(10*8); \
503 VPINSRQ_1_SI_X15(8*8)
505 // load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
506 #define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
511 VPINSRQ_1_SI_X12(5*8); \
512 VPINSRQ_1_SI_X13(10*8); \
513 VPINSRQ_1_SI_X14(7*8); \
514 VPINSRQ_1_SI_X15(15*8)
516 // load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
517 #define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
520 VMOVQ_SI_X14(12*8); \
521 VMOVQ_SI_X15(11*8); \
522 VPINSRQ_1_SI_X12(6*8); \
523 VPINSRQ_1_SI_X13(8*8); \
524 VPINSRQ_1_SI_X14(10*8); \
525 VPINSRQ_1_SI_X15(3*8)
527 // load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
528 #define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
530 VPSHUFD $0x4E, 8*8(SI), X13; \
533 VPINSRQ_1_SI_X12(6*8); \
534 VPINSRQ_1_SI_X14(3*8); \
535 VPINSRQ_1_SI_X15(11*8)
537 // load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
538 #define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
540 MOVQ 11*8(SI), X13; \
541 MOVQ 15*8(SI), X14; \
543 VPINSRQ_1_SI_X12(14*8); \
544 VPINSRQ_1_SI_X13_0; \
545 VPINSRQ_1_SI_X14(9*8); \
546 VPINSRQ_1_SI_X15(8*8)
548 // load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
549 #define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
554 VPINSRQ_1_SI_X12(15*8); \
555 VPINSRQ_1_SI_X13(2*8); \
556 VPINSRQ_1_SI_X14(4*8); \
557 VPINSRQ_1_SI_X15(10*8)
559 // load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
560 #define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
561 VMOVDQU 12*8(SI), X12; \
564 VPINSRQ_1_SI_X13(10*8); \
565 VPINSRQ_1_SI_X14(7*8); \
568 // load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
569 #define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
570 MOVQ 15*8(SI), X12; \
572 MOVQ 11*8(SI), X14; \
573 MOVQ 12*8(SI), X15; \
574 VPINSRQ_1_SI_X12(9*8); \
575 VPINSRQ_1_SI_X13(13*8); \
576 VPINSRQ_1_SI_X14(14*8); \
579 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
580 TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
584 MOVQ blocks_base+24(FP), SI
585 MOVQ blocks_len+32(FP), DI
593 VMOVDQU ·AVX_c40<>(SB), X0
594 VMOVDQU ·AVX_c48<>(SB), X1
598 VMOVDQU ·AVX_iv3<>(SB), X0
600 XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
622 VMOVDQU ·AVX_iv0<>(SB), X4
623 VMOVDQU ·AVX_iv1<>(SB), X5
624 VMOVDQU ·AVX_iv2<>(SB), X6
629 LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
634 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
636 LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
641 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
644 LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
649 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
651 LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
656 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
659 LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
660 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
662 LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
663 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
666 LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
667 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
669 LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
670 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
673 LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
674 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
676 LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
677 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
680 LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
681 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
683 LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
684 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
687 LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
688 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
690 LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
691 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
694 LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
695 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
697 LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
698 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
701 LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
702 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
704 LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
705 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
708 LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
709 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
711 LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
712 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
715 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
717 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
720 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
722 HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
752 // func supportsAVX2() bool
753 TEXT ·supportsAVX2(SB), 4, $0-1
754 MOVQ runtime·support_avx2(SB), AX
758 // func supportsAVX() bool
759 TEXT ·supportsAVX(SB), 4, $0-1
760 MOVQ runtime·support_avx(SB), AX