1 /* Copyright (C) 2008 The Android Open Source Project
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 * Code: Performs a shift right long
23 * Description: Perform a binary shift operation using two source registers
24 * where one is the shift amount and the other is the value to shift.
25 * Store the result in a destination register.
27 * Format: AA|op CC|BB (23x)
29 * Syntax: op vAA, vBB, vCC
32 FETCH_BB 1, %edx # %edx<- BB
33 FETCH_CC 1, %eax # %eax<- CC
34 movq (rFP, %edx, 4), %xmm1 # %xmm1<- vBB
35 movss (rFP, %eax, 4), %xmm0 # %xmm0<- vCC
36 movq .LshiftMask, %xmm2
37 pand %xmm2, %xmm0 # %xmm0<- masked for the shift bits
38 psrlq %xmm0, %xmm1 # %xmm1<- shifted vBB
39 cmpl $$0, 4(rFP, %edx, 4) # check if we need to consider sign
40 jl .L${opcode}_finish # consider sign
41 jmp .L${opcode}_final # sign is fine, finish
45 movq .Lvalue64, %xmm3 # %xmm3<- 64
46 psubq %xmm0, %xmm3 # %xmm3<- 64 - shift amount
47 movq .L64bits, %xmm4 # %xmm4<- lower 64 bits set
48 psllq %xmm3, %xmm4 # %xmm4<- correct mask for sign bits
49 por %xmm4, %xmm1 # %xmm1<- signed and shifted vBB
52 movq %xmm1, (rFP, rINST, 4) # vAA<- shifted vBB
53 FINISH 2 # jump to next instruction