1 // Copyright ©2015 The Gonum Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Some of the loop unrolling code is copied from:
6 // http://golang.org/src/math/big/arith_amd64.s
7 // which is distributed under these terms:
9 // Copyright (c) 2012 The Go Authors. All rights reserved.
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
15 // * Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 // * Redistributions in binary form must reproduce the above
18 // copyright notice, this list of conditions and the following disclaimer
19 // in the documentation and/or other materials provided with the
21 // * Neither the name of Google Inc. nor the names of its
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //+build !noasm,!appengine
50 // func AxpyUnitary(alpha float64, x, y []float64)
51 TEXT ·AxpyUnitary(SB), NOSPLIT, $0
52 MOVQ x_base+8(FP), X_PTR // X_PTR := &x
53 MOVQ y_base+32(FP), Y_PTR // Y_PTR := &y
54 MOVQ x_len+16(FP), LEN // LEN = min( len(x), len(y) )
55 CMPQ y_len+40(FP), LEN
56 CMOVQLE y_len+40(FP), LEN
57 CMPQ LEN, $0 // if LEN == 0 { return }
60 MOVSD alpha+0(FP), ALPHA // ALPHA := { alpha, alpha }
61 SHUFPD $0, ALPHA, ALPHA
62 MOVUPS ALPHA, ALPHA_2 // ALPHA_2 := ALPHA for pipelining
63 MOVQ Y_PTR, TAIL // Check memory alignment
64 ANDQ $15, TAIL // TAIL = &y % 16
65 JZ no_trim // if TAIL == 0 { goto no_trim }
67 // Align on 16-byte boundary
68 MOVSD (X_PTR), X2 // X2 := x[0]
69 MULSD ALPHA, X2 // X2 *= a
70 ADDSD (Y_PTR), X2 // X2 += y[0]
71 MOVSD X2, (DST_PTR) // y[0] = X2
74 JZ end // if LEN == 0 { return }
78 ANDQ $7, TAIL // TAIL := n % 8
79 SHRQ $3, LEN // LEN = floor( n / 8 )
80 JZ tail_start // if LEN == 0 { goto tail2_start }
83 // y[i] += alpha * x[i] unrolled 8x.
84 MOVUPS (X_PTR)(IDX*8), X2 // X_i = x[i]
85 MOVUPS 16(X_PTR)(IDX*8), X3
86 MOVUPS 32(X_PTR)(IDX*8), X4
87 MOVUPS 48(X_PTR)(IDX*8), X5
89 MULPD ALPHA, X2 // X_i *= a
94 ADDPD (Y_PTR)(IDX*8), X2 // X_i += y[i]
95 ADDPD 16(Y_PTR)(IDX*8), X3
96 ADDPD 32(Y_PTR)(IDX*8), X4
97 ADDPD 48(Y_PTR)(IDX*8), X5
99 MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X_i
100 MOVUPS X3, 16(DST_PTR)(IDX*8)
101 MOVUPS X4, 32(DST_PTR)(IDX*8)
102 MOVUPS X5, 48(DST_PTR)(IDX*8)
104 ADDQ $8, IDX // i += 8
106 JNZ loop // } while --LEN > 0
107 CMPQ TAIL, $0 // if TAIL == 0 { return }
110 tail_start: // Reset loop registers
111 MOVQ TAIL, LEN // Loop counter: LEN = TAIL
112 SHRQ $1, LEN // LEN = floor( TAIL / 2 )
113 JZ tail_one // if TAIL == 0 { goto tail }
116 MOVUPS (X_PTR)(IDX*8), X2 // X2 = x[i]
117 MULPD ALPHA, X2 // X2 *= a
118 ADDPD (Y_PTR)(IDX*8), X2 // X2 += y[i]
119 MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2
120 ADDQ $2, IDX // i += 2
122 JNZ tail_two // } while --LEN > 0
125 JZ end // if TAIL == 0 { goto end }
128 MOVSD (X_PTR)(IDX*8), X2 // X2 = x[i]
129 MULSD ALPHA, X2 // X2 *= a
130 ADDSD (Y_PTR)(IDX*8), X2 // X2 += y[i]
131 MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2