2 * Copyright (C) 2009 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "libdex/OpCode.h"
19 #include "dexdump/OpCodeNames.h"
21 #include "../../CompilerInternals.h"
23 #include <unistd.h> /* for cacheflush */
26 * opcode: ArmOpCode enum
27 * skeleton: pre-designated bit-pattern for this opcode
28 * k0: key to applying ds/de
29 * ds: dest start bit position
30 * de: dest end bit position
31 * k1: key to applying s1s/s1e
32 * s1s: src1 start bit position
33 * s1e: src1 end bit position
34 * k2: key to applying s2s/s2e
35 * s2s: src2 start bit position
36 * s2e: src2 end bit position
37 * operands: number of operands (for sanity check purposes)
39 * fmt: for pretty-printing
41 #define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
42 k3, k3s, k3e, flags, name, fmt, size) \
43 {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
44 {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
46 /* Instruction dump string format keys: !pf, where "!" is the start
47 * of the key, "p" is which numeric operand to use and "f" is the
51 * 0 -> operands[0] (dest)
52 * 1 -> operands[1] (src1)
53 * 2 -> operands[2] (src2)
54 * 3 -> operands[3] (extra)
61 * c -> branch condition (beq, bne, etc.)
62 * t -> pc-relative target
63 * u -> 1st half of bl[x] target
64 * v -> 2nd half ob bl[x] target
66 * s -> single precision floating point register
67 * S -> double precision floating point register
68 * m -> Thumb2 modified immediate
69 * n -> complimented Thumb2 modified immediate
70 * M -> Thumb2 16-bit zero-extended immediate
73 * [!] escape. To insert "!", use "!!"
75 /* NOTE: must be kept in sync with enum ArmOpcode from ArmLIR.h */
76 ArmEncodingMap EncodingMap[kArmLast] = {
77 ENCODING_MAP(kArm16BitData, 0x0000,
78 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
79 kFmtUnused, -1, -1, IS_UNARY_OP, "data", "0x!0h(!0d)", 1),
80 ENCODING_MAP(kThumbAdcRR, 0x4140,
81 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
83 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES | USES_CCODES,
84 "adcs", "r!0d, r!1d", 1),
85 ENCODING_MAP(kThumbAddRRI3, 0x1c00,
86 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
88 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
89 "adds", "r!0d, r!1d, #!2d", 1),
90 ENCODING_MAP(kThumbAddRI8, 0x3000,
91 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
93 IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
94 "adds", "r!0d, r!0d, #!1d", 1),
95 ENCODING_MAP(kThumbAddRRR, 0x1800,
96 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
98 IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
99 "adds", "r!0d, r!1d, r!2d", 1),
100 ENCODING_MAP(kThumbAddRRLH, 0x4440,
101 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
102 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
103 "add", "r!0d, r!1d", 1),
104 ENCODING_MAP(kThumbAddRRHL, 0x4480,
105 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
106 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
107 "add", "r!0d, r!1d", 1),
108 ENCODING_MAP(kThumbAddRRHH, 0x44c0,
109 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
110 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
111 "add", "r!0d, r!1d", 1),
112 ENCODING_MAP(kThumbAddPcRel, 0xa000,
113 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
114 kFmtUnused, -1, -1, IS_TERTIARY_OP | IS_BRANCH,
115 "add", "r!0d, pc, #!1E", 1),
116 ENCODING_MAP(kThumbAddSpRel, 0xa800,
117 kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
118 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF_SP | REG_USE_SP,
119 "add", "r!0d, sp, #!2E", 1),
120 ENCODING_MAP(kThumbAddSpI7, 0xb000,
121 kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
122 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
123 "add", "sp, #!0d*4", 1),
124 ENCODING_MAP(kThumbAndRR, 0x4000,
125 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
127 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
128 "ands", "r!0d, r!1d", 1),
129 ENCODING_MAP(kThumbAsrRRI5, 0x1000,
130 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
132 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
133 "asrs", "r!0d, r!1d, #!2d", 1),
134 ENCODING_MAP(kThumbAsrRR, 0x4100,
135 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
137 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
138 "asrs", "r!0d, r!1d", 1),
139 ENCODING_MAP(kThumbBCond, 0xd000,
140 kFmtBitBlt, 7, 0, kFmtBitBlt, 11, 8, kFmtUnused, -1, -1,
141 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | USES_CCODES,
143 ENCODING_MAP(kThumbBUncond, 0xe000,
144 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
145 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH,
147 ENCODING_MAP(kThumbBicRR, 0x4380,
148 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
150 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
151 "bics", "r!0d, r!1d", 1),
152 ENCODING_MAP(kThumbBkpt, 0xbe00,
153 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
154 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
156 ENCODING_MAP(kThumbBlx1, 0xf000,
157 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
158 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR,
160 ENCODING_MAP(kThumbBlx2, 0xe800,
161 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
162 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR,
164 ENCODING_MAP(kThumbBl1, 0xf000,
165 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
166 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
168 ENCODING_MAP(kThumbBl2, 0xf800,
169 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
170 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
172 ENCODING_MAP(kThumbBlxR, 0x4780,
173 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
175 IS_UNARY_OP | REG_USE0 | IS_BRANCH | REG_DEF_LR,
177 ENCODING_MAP(kThumbBx, 0x4700,
178 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
179 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
181 ENCODING_MAP(kThumbCmnRR, 0x42c0,
182 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
183 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
184 "cmn", "r!0d, r!1d", 1),
185 ENCODING_MAP(kThumbCmpRI8, 0x2800,
186 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
187 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | SETS_CCODES,
188 "cmp", "r!0d, #!1d", 1),
189 ENCODING_MAP(kThumbCmpRR, 0x4280,
190 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
191 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
192 "cmp", "r!0d, r!1d", 1),
193 ENCODING_MAP(kThumbCmpLH, 0x4540,
194 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
195 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
196 "cmp", "r!0d, r!1d", 1),
197 ENCODING_MAP(kThumbCmpHL, 0x4580,
198 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
199 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
200 "cmp", "r!0d, r!1d", 1),
201 ENCODING_MAP(kThumbCmpHH, 0x45c0,
202 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
203 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
204 "cmp", "r!0d, r!1d", 1),
205 ENCODING_MAP(kThumbEorRR, 0x4040,
206 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
208 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
209 "eors", "r!0d, r!1d", 1),
210 ENCODING_MAP(kThumbLdmia, 0xc800,
211 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
213 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
214 "ldmia", "r!0d!!, <!1R>", 1),
215 ENCODING_MAP(kThumbLdrRRI5, 0x6800,
216 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
217 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
218 "ldr", "r!0d, [r!1d, #!2E]", 1),
219 ENCODING_MAP(kThumbLdrRRR, 0x5800,
220 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
221 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
222 "ldr", "r!0d, [r!1d, r!2d]", 1),
223 ENCODING_MAP(kThumbLdrPcRel, 0x4800,
224 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
225 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
226 | IS_LOAD, "ldr", "r!0d, [pc, #!1E]", 1),
227 ENCODING_MAP(kThumbLdrSpRel, 0x9800,
228 kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
229 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
230 | IS_LOAD, "ldr", "r!0d, [sp, #!2E]", 1),
231 ENCODING_MAP(kThumbLdrbRRI5, 0x7800,
232 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
233 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
234 "ldrb", "r!0d, [r!1d, #2d]", 1),
235 ENCODING_MAP(kThumbLdrbRRR, 0x5c00,
236 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
237 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
238 "ldrb", "r!0d, [r!1d, r!2d]", 1),
239 ENCODING_MAP(kThumbLdrhRRI5, 0x8800,
240 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
241 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
242 "ldrh", "r!0d, [r!1d, #!2F]", 1),
243 ENCODING_MAP(kThumbLdrhRRR, 0x5a00,
244 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
245 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
246 "ldrh", "r!0d, [r!1d, r!2d]", 1),
247 ENCODING_MAP(kThumbLdrsbRRR, 0x5600,
248 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
249 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
250 "ldrsb", "r!0d, [r!1d, r!2d]", 1),
251 ENCODING_MAP(kThumbLdrshRRR, 0x5e00,
252 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
253 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
254 "ldrsh", "r!0d, [r!1d, r!2d]", 1),
255 ENCODING_MAP(kThumbLslRRI5, 0x0000,
256 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
258 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
259 "lsls", "r!0d, r!1d, #!2d", 1),
260 ENCODING_MAP(kThumbLslRR, 0x4080,
261 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
263 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
264 "lsls", "r!0d, r!1d", 1),
265 ENCODING_MAP(kThumbLsrRRI5, 0x0800,
266 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
268 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
269 "lsrs", "r!0d, r!1d, #!2d", 1),
270 ENCODING_MAP(kThumbLsrRR, 0x40c0,
271 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
273 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
274 "lsrs", "r!0d, r!1d", 1),
275 ENCODING_MAP(kThumbMovImm, 0x2000,
276 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
278 IS_BINARY_OP | REG_DEF0 | SETS_CCODES,
279 "movs", "r!0d, #!1d", 1),
280 ENCODING_MAP(kThumbMovRR, 0x1c00,
281 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
283 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
284 "movs", "r!0d, r!1d", 1),
285 ENCODING_MAP(kThumbMovRR_H2H, 0x46c0,
286 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
287 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
288 "mov", "r!0d, r!1d", 1),
289 ENCODING_MAP(kThumbMovRR_H2L, 0x4640,
290 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
291 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
292 "mov", "r!0d, r!1d", 1),
293 ENCODING_MAP(kThumbMovRR_L2H, 0x4680,
294 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
295 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
296 "mov", "r!0d, r!1d", 1),
297 ENCODING_MAP(kThumbMul, 0x4340,
298 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
300 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
301 "muls", "r!0d, r!1d", 1),
302 ENCODING_MAP(kThumbMvn, 0x43c0,
303 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
305 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
306 "mvns", "r!0d, r!1d", 1),
307 ENCODING_MAP(kThumbNeg, 0x4240,
308 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
310 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
311 "negs", "r!0d, r!1d", 1),
312 ENCODING_MAP(kThumbOrr, 0x4300,
313 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
315 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
316 "orrs", "r!0d, r!1d", 1),
317 ENCODING_MAP(kThumbPop, 0xbc00,
318 kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
320 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
321 | IS_LOAD, "pop", "<!0R>", 1),
322 ENCODING_MAP(kThumbPush, 0xb400,
323 kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
325 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
326 | IS_STORE, "push", "<!0R>", 1),
327 ENCODING_MAP(kThumbRorRR, 0x41c0,
328 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
330 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
331 "rors", "r!0d, r!1d", 1),
332 ENCODING_MAP(kThumbSbc, 0x4180,
333 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
335 IS_BINARY_OP | REG_DEF0_USE01 | USES_CCODES | SETS_CCODES,
336 "sbcs", "r!0d, r!1d", 1),
337 ENCODING_MAP(kThumbStmia, 0xc000,
338 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
340 IS_BINARY_OP | REG_DEF0 | REG_USE0 | REG_USE_LIST1 | IS_STORE,
341 "stmia", "r!0d!!, <!1R>", 1),
342 ENCODING_MAP(kThumbStrRRI5, 0x6000,
343 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
344 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
345 "str", "r!0d, [r!1d, #!2E]", 1),
346 ENCODING_MAP(kThumbStrRRR, 0x5000,
347 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
348 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
349 "str", "r!0d, [r!1d, r!2d]", 1),
350 ENCODING_MAP(kThumbStrSpRel, 0x9000,
351 kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
352 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
353 | IS_STORE, "str", "r!0d, [sp, #!2E]", 1),
354 ENCODING_MAP(kThumbStrbRRI5, 0x7000,
355 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
356 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
357 "strb", "r!0d, [r!1d, #!2d]", 1),
358 ENCODING_MAP(kThumbStrbRRR, 0x5400,
359 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
360 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
361 "strb", "r!0d, [r!1d, r!2d]", 1),
362 ENCODING_MAP(kThumbStrhRRI5, 0x8000,
363 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
364 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
365 "strh", "r!0d, [r!1d, #!2F]", 1),
366 ENCODING_MAP(kThumbStrhRRR, 0x5200,
367 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
368 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
369 "strh", "r!0d, [r!1d, r!2d]", 1),
370 ENCODING_MAP(kThumbSubRRI3, 0x1e00,
371 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
373 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
374 "subs", "r!0d, r!1d, #!2d]", 1),
375 ENCODING_MAP(kThumbSubRI8, 0x3800,
376 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
378 IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
379 "subs", "r!0d, #!1d", 1),
380 ENCODING_MAP(kThumbSubRRR, 0x1a00,
381 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
383 IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
384 "subs", "r!0d, r!1d, r!2d", 1),
385 ENCODING_MAP(kThumbSubSpI7, 0xb080,
386 kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
388 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
389 "sub", "sp, #!0d", 1),
390 ENCODING_MAP(kThumbSwi, 0xdf00,
391 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
393 ENCODING_MAP(kThumbTst, 0x4200,
394 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
395 kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE01 | SETS_CCODES,
396 "tst", "r!0d, r!1d", 1),
397 ENCODING_MAP(kThumb2Vldrs, 0xed900a00,
398 kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
399 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
400 "vldr", "!0s, [r!1d, #!2E]", 2),
401 ENCODING_MAP(kThumb2Vldrd, 0xed900b00,
402 kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
403 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
404 "vldr", "!0S, [r!1d, #!2E]", 2),
405 ENCODING_MAP(kThumb2Vmuls, 0xee200a00,
406 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
408 IS_TERTIARY_OP | REG_DEF0_USE12,
409 "vmuls", "!0s, !1s, !2s", 2),
410 ENCODING_MAP(kThumb2Vmuld, 0xee200b00,
411 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
412 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
413 "vmuld", "!0S, !1S, !2S", 2),
414 ENCODING_MAP(kThumb2Vstrs, 0xed800a00,
415 kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
416 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
417 "vstr", "!0s, [r!1d, #!2E]", 2),
418 ENCODING_MAP(kThumb2Vstrd, 0xed800b00,
419 kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
420 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
421 "vstr", "!0S, [r!1d, #!2E]", 2),
422 ENCODING_MAP(kThumb2Vsubs, 0xee300a40,
423 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
424 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
425 "vsub", "!0s, !1s, !2s", 2),
426 ENCODING_MAP(kThumb2Vsubd, 0xee300b40,
427 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
428 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
429 "vsub", "!0S, !1S, !2S", 2),
430 ENCODING_MAP(kThumb2Vadds, 0xee300a00,
431 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
432 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
433 "vadd", "!0s, !1s, !2s", 2),
434 ENCODING_MAP(kThumb2Vaddd, 0xee300b00,
435 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
436 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
437 "vadd", "!0S, !1S, !2S", 2),
438 ENCODING_MAP(kThumb2Vdivs, 0xee800a00,
439 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
440 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
441 "vdivs", "!0s, !1s, !2s", 2),
442 ENCODING_MAP(kThumb2Vdivd, 0xee800b00,
443 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
444 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
445 "vdivd", "!0S, !1S, !2S", 2),
446 ENCODING_MAP(kThumb2VcvtIF, 0xeeb80ac0,
447 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
448 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
449 "vcvt.f32", "!0s, !1s", 2),
450 ENCODING_MAP(kThumb2VcvtID, 0xeeb80bc0,
451 kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
452 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
453 "vcvt.f64", "!0S, !1s", 2),
454 ENCODING_MAP(kThumb2VcvtFI, 0xeebd0ac0,
455 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
456 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
457 "vcvt.s32.f32 ", "!0s, !1s", 2),
458 ENCODING_MAP(kThumb2VcvtDI, 0xeebd0bc0,
459 kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
460 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
461 "vcvt.s32.f64 ", "!0s, !1S", 2),
462 ENCODING_MAP(kThumb2VcvtFd, 0xeeb70ac0,
463 kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
464 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
465 "vcvt.f64.f32 ", "!0S, !1s", 2),
466 ENCODING_MAP(kThumb2VcvtDF, 0xeeb70bc0,
467 kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
468 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
469 "vcvt.f32.f64 ", "!0s, !1S", 2),
470 ENCODING_MAP(kThumb2Vsqrts, 0xeeb10ac0,
471 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
472 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
473 "vsqrt.f32 ", "!0s, !1s", 2),
474 ENCODING_MAP(kThumb2Vsqrtd, 0xeeb10bc0,
475 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
476 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
477 "vsqrt.f64 ", "!0S, !1S", 2),
478 ENCODING_MAP(kThumb2MovImmShift, 0xf04f0000, /* no setflags encoding */
479 kFmtBitBlt, 11, 8, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
480 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
481 "mov", "r!0d, #!1m", 2),
482 ENCODING_MAP(kThumb2MovImm16, 0xf2400000,
483 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
484 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
485 "mov", "r!0d, #!1M", 2),
486 ENCODING_MAP(kThumb2StrRRI12, 0xf8c00000,
487 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
488 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
489 "str", "r!0d,[r!1d, #!2d", 2),
490 ENCODING_MAP(kThumb2LdrRRI12, 0xf8d00000,
491 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
492 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
493 "ldr", "r!0d,[r!1d, #!2d", 2),
494 ENCODING_MAP(kThumb2StrRRI8Predec, 0xf8400c00,
495 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
496 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
497 "str", "r!0d,[r!1d, #-!2d]", 2),
498 ENCODING_MAP(kThumb2LdrRRI8Predec, 0xf8500c00,
499 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
500 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
501 "ldr", "r!0d,[r!1d, #-!2d]", 2),
502 ENCODING_MAP(kThumb2Cbnz, 0xb900, /* Note: does not affect flags */
503 kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
504 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH,
505 "cbnz", "r!0d,!1t", 1),
506 ENCODING_MAP(kThumb2Cbz, 0xb100, /* Note: does not affect flags */
507 kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
508 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH,
509 "cbz", "r!0d,!1t", 1),
510 ENCODING_MAP(kThumb2AddRRI12, 0xf2000000,
511 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
513 IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
514 "add", "r!0d,r!1d,#!2d", 2),
515 ENCODING_MAP(kThumb2MovRR, 0xea4f0000, /* no setflags encoding */
516 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
517 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
518 "mov", "r!0d, r!1d", 2),
519 ENCODING_MAP(kThumb2Vmovs, 0xeeb00a40,
520 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
521 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
522 "vmov.f32 ", " !0s, !1s", 2),
523 ENCODING_MAP(kThumb2Vmovd, 0xeeb00b40,
524 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
525 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
526 "vmov.f64 ", " !0S, !1S", 2),
527 ENCODING_MAP(kThumb2Ldmia, 0xe8900000,
528 kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
530 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
531 "ldmia", "r!0d!!, <!1R>", 2),
532 ENCODING_MAP(kThumb2Stmia, 0xe8800000,
533 kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
535 IS_BINARY_OP | REG_DEF0_USE0 | REG_USE_LIST1 | IS_STORE,
536 "stmia", "r!0d!!, <!1R>", 2),
537 ENCODING_MAP(kThumb2AddRRR, 0xeb100000, /* setflags encoding */
538 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
540 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
541 "adds", "r!0d, r!1d, r!2d", 2),
542 ENCODING_MAP(kThumb2SubRRR, 0xebb00000, /* setflags enconding */
543 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
545 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
546 "subs", "r!0d, r!1d, r!2d", 2),
547 ENCODING_MAP(kThumb2SbcRRR, 0xeb700000, /* setflags encoding */
548 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
550 IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES | SETS_CCODES,
551 "sbcs", "r!0d, r!1d, r!2d", 2),
552 ENCODING_MAP(kThumb2CmpRR, 0xebb00f00,
553 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
555 IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
556 "cmp", "r!0d, r!1d", 2),
557 ENCODING_MAP(kThumb2SubRRI12, 0xf2a00000,
558 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
560 IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
561 "sub", "r!0d,r!1d,#!2d", 2),
562 ENCODING_MAP(kThumb2MvnImmShift, 0xf06f0000, /* no setflags encoding */
563 kFmtBitBlt, 11, 8, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
564 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
565 "mvn", "r!0d, #!1n", 2),
566 ENCODING_MAP(kThumb2Sel, 0xfaa0f080,
567 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
569 IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
570 "sel", "r!0d, r!1d, r!2d", 2),
571 ENCODING_MAP(kThumb2Ubfx, 0xf3c00000,
572 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
573 kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
574 "ubfx", "r!0d, r!1d, #!2d, #!3d", 2),
575 ENCODING_MAP(kThumb2Sbfx, 0xf3400000,
576 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
577 kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
578 "sbfx", "r!0d, r!1d, #!2d, #!3d", 2),
579 ENCODING_MAP(kThumb2LdrRRR, 0xf8500000,
580 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
581 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
582 "ldr", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
583 ENCODING_MAP(kThumb2LdrhRRR, 0xf8300000,
584 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
585 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
586 "ldrh", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
587 ENCODING_MAP(kThumb2LdrshRRR, 0xf9300000,
588 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
589 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
590 "ldrsh", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
591 ENCODING_MAP(kThumb2LdrbRRR, 0xf8100000,
592 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
593 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
594 "ldrb", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
595 ENCODING_MAP(kThumb2LdrsbRRR, 0xf9100000,
596 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
597 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
598 "ldrsb", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
599 ENCODING_MAP(kThumb2StrRRR, 0xf8400000,
600 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
601 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
602 "str", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
603 ENCODING_MAP(kThumb2StrhRRR, 0xf8200000,
604 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
605 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
606 "strh", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
607 ENCODING_MAP(kThumb2StrbRRR, 0xf8000000,
608 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
609 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
610 "strb", "r!0d,[r!1d, r!2d, LSL #!3d]", 2),
611 ENCODING_MAP(kThumb2LdrhRRI12, 0xf8b00000,
612 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
613 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
614 "ldrh", "r!0d,[r!1d, #!2d]", 2),
615 ENCODING_MAP(kThumb2LdrshRRI12, 0xf9b00000,
616 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
617 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
618 "ldrsh", "r!0d,[r!1d, #!2d]", 2),
619 ENCODING_MAP(kThumb2LdrbRRI12, 0xf8900000,
620 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
621 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
622 "ldrb", "r!0d,[r!1d, #!2d]", 2),
623 ENCODING_MAP(kThumb2LdrsbRRI12, 0xf9900000,
624 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
625 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
626 "ldrsb", "r!0d,[r!1d, #!2d]", 2),
627 ENCODING_MAP(kThumb2StrhRRI12, 0xf8a00000,
628 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
629 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
630 "strh", "r!0d,[r!1d, #!2d]", 2),
631 ENCODING_MAP(kThumb2StrbRRI12, 0xf8800000,
632 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
633 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
634 "strb", "r!0d,[r!1d, #!2d]", 2),
635 ENCODING_MAP(kThumb2Pop, 0xe8bd0000,
636 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
638 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
639 | IS_LOAD, "pop", "<!0R>", 2),
640 ENCODING_MAP(kThumb2Push, 0xe8ad0000,
641 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
643 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
644 | IS_STORE, "push", "<!0R>", 2),
645 ENCODING_MAP(kThumb2CmpRI8, 0xf1b00f00,
646 kFmtBitBlt, 19, 16, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
648 IS_BINARY_OP | REG_USE0 | SETS_CCODES,
649 "cmp", "r!0d, #!1m", 2),
650 ENCODING_MAP(kThumb2AdcRRR, 0xeb500000, /* setflags encoding */
651 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
653 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
654 "acds", "r!0d, r!1d, r!2d, shift !3d", 2),
655 ENCODING_MAP(kThumb2AndRRR, 0xea000000,
656 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
657 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
658 "and", "r!0d, r!1d, r!2d, shift !3d", 2),
659 ENCODING_MAP(kThumb2BicRRR, 0xea200000,
660 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
661 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
662 "bic", "r!0d, r!1d, r!2d, shift !3d", 2),
663 ENCODING_MAP(kThumb2CmnRR, 0xeb000000,
664 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
666 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
667 "cmn", "r!0d, r!1d, shift !2d", 2),
668 ENCODING_MAP(kThumb2EorRRR, 0xea800000,
669 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
670 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
671 "eor", "r!0d, r!1d, r!2d, shift !3d", 2),
672 ENCODING_MAP(kThumb2MulRRR, 0xfb00f000,
673 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
674 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
675 "mul", "r!0d, r!1d, r!2d", 2),
676 ENCODING_MAP(kThumb2MnvRR, 0xea6f0000,
677 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
678 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
679 "mvn", "r!0d, r!1d, shift !2d", 2),
680 ENCODING_MAP(kThumb2RsubRRI8, 0xf1d00000,
681 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
683 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
684 "rsb", "r!0d,r!1d,#!2m", 2),
685 ENCODING_MAP(kThumb2NegRR, 0xf1d00000, /* instance of rsub */
686 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtUnused, -1, -1,
688 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
689 "neg", "r!0d,r!1d", 2),
690 ENCODING_MAP(kThumb2OrrRRR, 0xea400000,
691 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
692 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
693 "orr", "r!0d, r!1d, r!2d, shift !3d", 2),
694 ENCODING_MAP(kThumb2TstRR, 0xea100f00,
695 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
697 IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
698 "tst", "r!0d, r!1d, shift !2d", 2),
699 ENCODING_MAP(kThumb2LslRRR, 0xfa00f000,
700 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
701 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
702 "lsl", "r!0d, r!1d, r!2d", 2),
703 ENCODING_MAP(kThumb2LsrRRR, 0xfa20f000,
704 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
705 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
706 "lsr", "r!0d, r!1d, r!2d", 2),
707 ENCODING_MAP(kThumb2AsrRRR, 0xfa40f000,
708 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
709 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
710 "asr", "r!0d, r!1d, r!2d", 2),
711 ENCODING_MAP(kThumb2RorRRR, 0xfa60f000,
712 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
713 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
714 "ror", "r!0d, r!1d, r!2d", 2),
715 ENCODING_MAP(kThumb2LslRRI5, 0xea4f0000,
716 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
717 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
718 "lsl", "r!0d, r!1d, #!2d", 2),
719 ENCODING_MAP(kThumb2LsrRRI5, 0xea4f0010,
720 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
721 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
722 "lsr", "r!0d, r!1d, #!2d", 2),
723 ENCODING_MAP(kThumb2AsrRRI5, 0xea4f0020,
724 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
725 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
726 "asr", "r!0d, r!1d, #!2d", 2),
727 ENCODING_MAP(kThumb2RorRRI5, 0xea4f0030,
728 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
729 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
730 "ror", "r!0d, r!1d, #!2d", 2),
731 ENCODING_MAP(kThumb2BicRRI8, 0xf0200000,
732 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
733 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
734 "bic", "r!0d, r!1d, #!2m", 2),
735 ENCODING_MAP(kThumb2AndRRI8, 0xf0000000,
736 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
737 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
738 "and", "r!0d, r!1d, #!2m", 2),
739 ENCODING_MAP(kThumb2OrrRRI8, 0xf0400000,
740 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
741 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
742 "orr", "r!0d, r!1d, #!2m", 2),
743 ENCODING_MAP(kThumb2EorRRI8, 0xf0800000,
744 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
745 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
746 "eor", "r!0d, r!1d, #!2m", 2),
747 ENCODING_MAP(kThumb2AddRRI8, 0xf1100000,
748 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
750 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
751 "adds", "r!0d, r!1d, #!2m", 2),
752 ENCODING_MAP(kThumb2AdcRRI8, 0xf1500000,
753 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
755 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
756 "adcs", "r!0d, r!1d, #!2m", 2),
757 ENCODING_MAP(kThumb2SubRRI8, 0xf1b00000,
758 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
760 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
761 "subs", "r!0d, r!1d, #!2m", 2),
762 ENCODING_MAP(kThumb2SbcRRI8, 0xf1700000,
763 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
765 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
766 "sbcs", "r!0d, r!1d, #!2m", 2),
767 ENCODING_MAP(kThumb2It, 0xbf00,
768 kFmtBitBlt, 7, 4, kFmtBitBlt, 3, 0, kFmtModImm, -1, -1,
769 kFmtUnused, -1, -1, IS_BINARY_OP | IS_IT | USES_CCODES,
771 ENCODING_MAP(kThumb2Fmstat, 0xeef1fa10,
772 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
773 kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES,
775 ENCODING_MAP(kThumb2Vcmpd, 0xeeb40b40,
776 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
777 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
778 "vcmp.f64", "!0S, !1S", 2),
779 ENCODING_MAP(kThumb2Vcmps, 0xeeb40a40,
780 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
781 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
782 "vcmp.f32", "!0s, !1s", 2),
783 ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000,
784 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
786 IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
787 "ldr", "r!0d,[rpc, #!1d]", 2),
788 ENCODING_MAP(kThumb2BCond, 0xf0008000,
789 kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
791 IS_BINARY_OP | IS_BRANCH | USES_CCODES,
793 ENCODING_MAP(kThumb2Vmovd_RR, 0xeeb00b40,
794 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
795 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
796 "vmov.f64", "!0S, !1S", 2),
797 ENCODING_MAP(kThumb2Vmovs_RR, 0xeeb00a40,
798 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
799 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
800 "vmov.f32", "!0s, !1s", 2),
801 ENCODING_MAP(kThumb2Fmrs, 0xee100a10,
802 kFmtBitBlt, 15, 12, kFmtSfp, 7, 16, kFmtUnused, -1, -1,
803 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
804 "fmrs", "r!0d, !1s", 2),
805 ENCODING_MAP(kThumb2Fmsr, 0xee000a10,
806 kFmtSfp, 7, 16, kFmtBitBlt, 15, 12, kFmtUnused, -1, -1,
807 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
808 "fmsr", "!0s, r!1d", 2),
809 ENCODING_MAP(kThumb2Fmrrd, 0xec500b10,
810 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtDfp, 5, 0,
811 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2,
812 "fmrrd", "r!0d, r!1d, !2S", 2),
813 ENCODING_MAP(kThumb2Fmdrr, 0xec400b10,
814 kFmtDfp, 5, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
815 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
816 "fmdrr", "!0S, r!1d, r!2d", 2),
817 ENCODING_MAP(kThumb2Vabsd, 0xeeb00bc0,
818 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
819 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
820 "vabs.f64", "!0S, !1S", 2),
821 ENCODING_MAP(kThumb2Vabss, 0xeeb00ac0,
822 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
823 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
824 "vabs.f32", "!0s, !1s", 2),
825 ENCODING_MAP(kThumb2Vnegd, 0xeeb10b40,
826 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
827 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
828 "vneg.f64", "!0S, !1S", 2),
829 ENCODING_MAP(kThumb2Vnegs, 0xeeb10a40,
830 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
831 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
832 "vneg.f32", "!0s, !1s", 2),
833 ENCODING_MAP(kThumb2Vmovs_IMM8, 0xeeb00a00,
834 kFmtSfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
835 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
836 "vmov.f32", "!0s, #0x!1h", 2),
837 ENCODING_MAP(kThumb2Vmovd_IMM8, 0xeeb00b00,
838 kFmtDfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
839 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
840 "vmov.f64", "!0S, #0x!1h", 2),
841 ENCODING_MAP(kThumb2Mla, 0xfb000000,
842 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
844 IS_QUAD_OP | REG_DEF0 | REG_USE1 | REG_USE2 | REG_USE3,
845 "mla", "r!0d, r!1d, r!2d, r!3d", 2),
846 ENCODING_MAP(kThumb2Umull, 0xfba00000,
847 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
849 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | REG_USE3,
850 "umull", "r!0d, r!1d, r!2d, r!3d", 2),
851 ENCODING_MAP(kThumb2Ldrex, 0xe8500f00,
852 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
853 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
854 "ldrex", "r!0d,[r!1d, #!2E]", 2),
855 ENCODING_MAP(kThumb2Strex, 0xe8400000,
856 kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
857 kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STORE,
858 "strex", "r!0d,r!1d, [r!2d, #!2E]", 2),
859 ENCODING_MAP(kThumb2Clrex, 0xf3bf8f2f,
860 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
861 kFmtUnused, -1, -1, NO_OPERAND,
863 ENCODING_MAP(kThumb2Bfi, 0xf3600000,
864 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtShift5, -1, -1,
865 kFmtBitBlt, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
866 "bfi", "r!0d,r!1d,#!2d,#!3d", 2),
867 ENCODING_MAP(kThumb2Bfc, 0xf36f0000,
868 kFmtBitBlt, 11, 8, kFmtShift5, -1, -1, kFmtBitBlt, 4, 0,
869 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0,
870 "bfc", "r!0d,#!1d,#!2d", 2),
874 * The fake NOP of moving r0 to r0 actually will incur data stalls if r0 is
875 * not ready. Since r5 (rFP) is not updated often, it is less likely to
876 * generate unnecessary stall cycles.
878 #define PADDING_MOV_R5_R5 0x1C2D
880 /* Write the numbers in the literal pool to the codegen stream */
881 static void installDataContent(CompilationUnit *cUnit)
883 int *dataPtr = (int *) ((char *) cUnit->baseAddr + cUnit->dataOffset);
884 ArmLIR *dataLIR = (ArmLIR *) cUnit->wordList;
886 *dataPtr++ = dataLIR->operands[0];
887 dataLIR = NEXT_LIR(dataLIR);
891 /* Returns the size of a Jit trace description */
892 static int jitTraceDescriptionSize(const JitTraceDescription *desc)
895 for (runCount = 0; ; runCount++) {
896 if (desc->trace[runCount].frag.runEnd)
899 return sizeof(JitCodeDesc) + ((runCount+1) * sizeof(JitTraceRun));
902 /* Return TRUE if error happens */
903 static bool assembleInstructions(CompilationUnit *cUnit, intptr_t startAddr)
905 short *bufferAddr = (short *) cUnit->codeBuffer;
908 for (lir = (ArmLIR *) cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
909 if (lir->opCode < 0) {
910 if ((lir->opCode == kArmPseudoPseudoAlign4) &&
911 /* 1 means padding is needed */
912 (lir->operands[0] == 1)) {
913 *bufferAddr++ = PADDING_MOV_R5_R5;
922 if (lir->opCode == kThumbLdrPcRel ||
923 lir->opCode == kThumb2LdrPcRel12 ||
924 lir->opCode == kThumbAddPcRel ||
925 ((lir->opCode == kThumb2Vldrs) && (lir->operands[1] == rpc))) {
926 ArmLIR *lirTarget = (ArmLIR *) lir->generic.target;
927 intptr_t pc = (lir->generic.offset + 4) & ~3;
929 * Allow an offset (stored in operands[2] to be added to the
930 * PC-relative target. Useful to get to a fixed field inside a
933 intptr_t target = lirTarget->generic.offset + lir->operands[2];
934 int delta = target - pc;
936 LOGE("PC-rel distance is not multiples of 4: %d\n", delta);
937 dvmCompilerAbort(cUnit);
939 if ((lir->opCode == kThumb2LdrPcRel12) && (delta > 4091)) {
941 } else if (delta > 1020) {
944 if (lir->opCode == kThumb2Vldrs) {
945 lir->operands[2] = delta >> 2;
947 lir->operands[1] = (lir->opCode == kThumb2LdrPcRel12) ?
950 } else if (lir->opCode == kThumb2Cbnz || lir->opCode == kThumb2Cbz) {
951 ArmLIR *targetLIR = (ArmLIR *) lir->generic.target;
952 intptr_t pc = lir->generic.offset + 4;
953 intptr_t target = targetLIR->generic.offset;
954 int delta = target - pc;
955 if (delta > 126 || delta < 0) {
957 * TODO: allow multiple kinds of assembler failure to allow
958 * change of code patterns when things don't fit.
962 lir->operands[1] = delta >> 1;
964 } else if (lir->opCode == kThumbBCond ||
965 lir->opCode == kThumb2BCond) {
966 ArmLIR *targetLIR = (ArmLIR *) lir->generic.target;
967 intptr_t pc = lir->generic.offset + 4;
968 intptr_t target = targetLIR->generic.offset;
969 int delta = target - pc;
970 if ((lir->opCode == kThumbBCond) && (delta > 254 || delta < -256)) {
973 lir->operands[0] = delta >> 1;
974 } else if (lir->opCode == kThumbBUncond) {
975 ArmLIR *targetLIR = (ArmLIR *) lir->generic.target;
976 intptr_t pc = lir->generic.offset + 4;
977 intptr_t target = targetLIR->generic.offset;
978 int delta = target - pc;
979 if (delta > 2046 || delta < -2048) {
980 LOGE("Unconditional branch distance out of range: %d\n", delta);
981 dvmCompilerAbort(cUnit);
983 lir->operands[0] = delta >> 1;
984 } else if (lir->opCode == kThumbBlx1) {
985 assert(NEXT_LIR(lir)->opCode == kThumbBlx2);
987 intptr_t curPC = (startAddr + lir->generic.offset + 4) & ~3;
988 intptr_t target = lir->operands[1];
990 /* Match bit[1] in target with base */
994 int delta = target - curPC;
995 assert((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
997 lir->operands[0] = (delta >> 12) & 0x7ff;
998 NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
1001 ArmEncodingMap *encoder = &EncodingMap[lir->opCode];
1002 u4 bits = encoder->skeleton;
1004 for (i = 0; i < 4; i++) {
1007 operand = lir->operands[i];
1008 switch(encoder->fieldLoc[i].kind) {
1012 value = ((operand & 0xF0) >> 4) << encoder->fieldLoc[i].end;
1013 value |= (operand & 0x0F) << encoder->fieldLoc[i].start;
1018 * NOTE: branch offsets are not handled here, but
1019 * in the main assembly loop (where label values
1020 * are known). For reference, here is what the
1021 * encoder handing would be:
1022 value = ((operand & 0x80000) >> 19) << 26;
1023 value |= ((operand & 0x40000) >> 18) << 11;
1024 value |= ((operand & 0x20000) >> 17) << 13;
1025 value |= ((operand & 0x1f800) >> 11) << 16;
1026 value |= (operand & 0x007ff);
1031 value = ((operand & 0x1c) >> 2) << 12;
1032 value |= (operand & 0x03) << 6;
1036 value = ((operand & 0x70) >> 4) << 12;
1037 value |= (operand & 0x0f) << 4;
1041 value = operand - 1;
1045 value = ((operand & 0x1c) >> 2) << 12;
1046 value |= (operand & 0x03) << 6;
1050 value = ((operand & 0x20) >> 5) << 9;
1051 value |= (operand & 0x1f) << 3;
1055 value = (operand << encoder->fieldLoc[i].start) &
1056 ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
1060 assert(DOUBLEREG(operand));
1061 assert((operand & 0x1) == 0);
1062 int regName = (operand & FP_REG_MASK) >> 1;
1063 /* Snag the 1-bit slice and position it */
1064 value = ((regName & 0x10) >> 4) <<
1065 encoder->fieldLoc[i].end;
1066 /* Extract and position the 4-bit slice */
1067 value |= (regName & 0x0f) <<
1068 encoder->fieldLoc[i].start;
1073 assert(SINGLEREG(operand));
1074 /* Snag the 1-bit slice and position it */
1075 value = (operand & 0x1) <<
1076 encoder->fieldLoc[i].end;
1077 /* Extract and position the 4-bit slice */
1078 value |= ((operand & 0x1e) >> 1) <<
1079 encoder->fieldLoc[i].start;
1084 value = ((operand & 0x800) >> 11) << 26;
1085 value |= ((operand & 0x700) >> 8) << 12;
1086 value |= operand & 0x0ff;
1090 value = ((operand & 0x0800) >> 11) << 26;
1091 value |= ((operand & 0xf000) >> 12) << 16;
1092 value |= ((operand & 0x0700) >> 8) << 12;
1093 value |= operand & 0x0ff;
1100 if (encoder->size == 2) {
1101 *bufferAddr++ = (bits >> 16) & 0xffff;
1103 *bufferAddr++ = bits & 0xffff;
1108 #if defined(SIGNATURE_BREAKPOINT)
1109 /* Inspect the assembled instruction stream to find potential matches */
1110 static void matchSignatureBreakpoint(const CompilationUnit *cUnit,
1114 u4 *ptr = (u4 *) cUnit->codeBuffer;
1116 for (i = 0; i < size - gDvmJit.signatureBreakpointSize + 1; i++) {
1117 if (ptr[i] == gDvmJit.signatureBreakpoint[0]) {
1118 for (j = 1; j < gDvmJit.signatureBreakpointSize; j++) {
1119 if (ptr[i+j] != gDvmJit.signatureBreakpoint[j]) {
1123 if (j == gDvmJit.signatureBreakpointSize) {
1124 LOGD("Signature match starting from offset %#x (%d words)",
1125 i*4, gDvmJit.signatureBreakpointSize);
1126 int descSize = jitTraceDescriptionSize(cUnit->traceDesc);
1127 JitTraceDescription *newCopy =
1128 (JitTraceDescription *) malloc(descSize);
1129 memcpy(newCopy, cUnit->traceDesc, descSize);
1130 dvmCompilerWorkEnqueue(NULL, kWorkOrderTraceDebug, newCopy);
1139 * Translation layout in the code cache. Note that the codeAddress pointer
1140 * in JitTable will point directly to the code body (field codeAddress). The
1141 * chain cell offset codeAddress - 2, and (if present) executionCount is at
1144 * +----------------------------+
1145 * | Execution count | -> [Optional] 4 bytes
1146 * +----------------------------+
1147 * +--| Offset to chain cell counts| -> 2 bytes
1148 * | +----------------------------+
1149 * | | Code body | -> Start address for translation
1150 * | | | variable in 2-byte chunks
1151 * | . . (JitTable's codeAddress points here)
1154 * | +----------------------------+
1155 * | | Chaining Cells | -> 8 bytes each, must be 4 byte aligned
1159 * | +----------------------------+
1160 * | | Gap for large switch stmt | -> # cases >= MAX_CHAINED_SWITCH_CASES
1161 * | +----------------------------+
1162 * +->| Chaining cell counts | -> 8 bytes, chain cell counts by type
1163 * +----------------------------+
1164 * | Trace description | -> variable sized
1167 * +----------------------------+
1168 * | Literal pool | -> 4-byte aligned, variable size
1172 * +----------------------------+
1174 * Go over each instruction in the list and calculate the offset from the top
1175 * before sending them off to the assembler. If out-of-range branch distance is
1176 * seen rearrange the instructions a bit to correct it.
1178 void dvmCompilerAssembleLIR(CompilationUnit *cUnit, JitTranslationInfo *info)
1184 ChainCellCounts chainCellCounts;
1185 int descSize = jitTraceDescriptionSize(cUnit->traceDesc);
1186 int chainingCellGap;
1188 info->instructionSet = cUnit->instructionSet;
1190 /* Beginning offset needs to allow space for chain cell offset */
1191 for (armLIR = (ArmLIR *) cUnit->firstLIRInsn;
1193 armLIR = NEXT_LIR(armLIR)) {
1194 armLIR->generic.offset = offset;
1195 if (armLIR->opCode >= 0 && !armLIR->isNop) {
1196 armLIR->size = EncodingMap[armLIR->opCode].size * 2;
1197 offset += armLIR->size;
1198 } else if (armLIR->opCode == kArmPseudoPseudoAlign4) {
1201 armLIR->operands[0] = 1;
1203 armLIR->operands[0] = 0;
1206 /* Pseudo opcodes don't consume space */
1209 /* Const values have to be word aligned */
1210 offset = (offset + 3) & ~3;
1213 * Get the gap (# of u4) between the offset of chaining cell count and
1214 * the bottom of real chaining cells. If the translation has chaining
1215 * cells, the gap is guaranteed to be multiples of 4.
1217 chainingCellGap = (offset - cUnit->chainingCellBottom->offset) >> 2;
1219 /* Add space for chain cell counts & trace description */
1220 u4 chainCellOffset = offset;
1221 ArmLIR *chainCellOffsetLIR = (ArmLIR *) cUnit->chainCellOffsetLIR;
1222 assert(chainCellOffsetLIR);
1223 assert(chainCellOffset < 0x10000);
1224 assert(chainCellOffsetLIR->opCode == kArm16BitData &&
1225 chainCellOffsetLIR->operands[0] == CHAIN_CELL_OFFSET_TAG);
1228 * Replace the CHAIN_CELL_OFFSET_TAG with the real value. If trace
1229 * profiling is enabled, subtract 4 (occupied by the counter word) from
1230 * the absolute offset as the value stored in chainCellOffsetLIR is the
1231 * delta from &chainCellOffsetLIR to &ChainCellCounts.
1233 chainCellOffsetLIR->operands[0] =
1234 gDvmJit.profile ? (chainCellOffset - 4) : chainCellOffset;
1236 offset += sizeof(chainCellCounts) + descSize;
1238 assert((offset & 0x3) == 0); /* Should still be word aligned */
1240 /* Set up offsets for literals */
1241 cUnit->dataOffset = offset;
1243 for (lir = cUnit->wordList; lir; lir = lir->next) {
1244 lir->offset = offset;
1248 cUnit->totalSize = offset;
1250 if (gDvmJit.codeCacheByteUsed + cUnit->totalSize > gDvmJit.codeCacheSize) {
1251 gDvmJit.codeCacheFull = true;
1252 cUnit->baseAddr = NULL;
1256 /* Allocate enough space for the code block */
1257 cUnit->codeBuffer = dvmCompilerNew(chainCellOffset, true);
1258 if (cUnit->codeBuffer == NULL) {
1259 LOGE("Code buffer allocation failure\n");
1260 cUnit->baseAddr = NULL;
1264 bool assemblerFailure = assembleInstructions(
1265 cUnit, (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed);
1268 * Currently the only reason that can cause the assembler to fail is due to
1269 * trace length - cut it in half and retry.
1271 if (assemblerFailure) {
1272 cUnit->halveInstCount = true;
1276 #if defined(SIGNATURE_BREAKPOINT)
1277 if (info->discardResult == false && gDvmJit.signatureBreakpoint != NULL &&
1278 chainCellOffset/4 >= gDvmJit.signatureBreakpointSize) {
1279 matchSignatureBreakpoint(cUnit, chainCellOffset/4);
1283 /* Don't go all the way if the goal is just to get the verbose output */
1284 if (info->discardResult) return;
1286 cUnit->baseAddr = (char *) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed;
1287 gDvmJit.codeCacheByteUsed += offset;
1289 /* Install the code block */
1290 memcpy((char*)cUnit->baseAddr, cUnit->codeBuffer, chainCellOffset);
1291 gDvmJit.numCompilations++;
1293 /* Install the chaining cell counts */
1294 for (i=0; i< kChainingCellGap; i++) {
1295 chainCellCounts.u.count[i] = cUnit->numChainingCells[i];
1298 /* Set the gap number in the chaining cell count structure */
1299 chainCellCounts.u.count[kChainingCellGap] = chainingCellGap;
1301 memcpy((char*)cUnit->baseAddr + chainCellOffset, &chainCellCounts,
1302 sizeof(chainCellCounts));
1304 /* Install the trace description */
1305 memcpy((char*)cUnit->baseAddr + chainCellOffset + sizeof(chainCellCounts),
1306 cUnit->traceDesc, descSize);
1308 /* Write the literals directly into the code cache */
1309 installDataContent(cUnit);
1311 /* Flush dcache and invalidate the icache to maintain coherence */
1312 cacheflush((long)cUnit->baseAddr,
1313 (long)((char *) cUnit->baseAddr + offset), 0);
1315 /* Record code entry point and instruction set */
1316 info->codeAddress = (char*)cUnit->baseAddr + cUnit->headerSize;
1317 /* If applicable, mark low bit to denote thumb */
1318 if (info->instructionSet != DALVIK_JIT_ARM)
1319 info->codeAddress = (char*)info->codeAddress + 1;
1323 * Returns the skeleton bit pattern associated with an opcode. All
1324 * variable fields are zeroed.
1326 static u4 getSkeleton(ArmOpCode op)
1328 return EncodingMap[op].skeleton;
1331 static u4 assembleChainingBranch(int branchOffset, bool thumbTarget)
1336 thumb1 = (getSkeleton(kThumbBlx1) | ((branchOffset>>12) & 0x7ff));
1337 thumb2 = (getSkeleton(kThumbBlx2) | ((branchOffset>> 1) & 0x7ff));
1338 } else if ((branchOffset < -2048) | (branchOffset > 2046)) {
1339 thumb1 = (getSkeleton(kThumbBl1) | ((branchOffset>>12) & 0x7ff));
1340 thumb2 = (getSkeleton(kThumbBl2) | ((branchOffset>> 1) & 0x7ff));
1342 thumb1 = (getSkeleton(kThumbBUncond) | ((branchOffset>> 1) & 0x7ff));
1343 thumb2 = getSkeleton(kThumbOrr); /* nop -> or r0, r0 */
1346 return thumb2<<16 | thumb1;
1350 * Perform translation chain operation.
1351 * For ARM, we'll use a pair of thumb instructions to generate
1352 * an unconditional chaining branch of up to 4MB in distance.
1353 * Use a BL, because the generic "interpret" translation needs
1354 * the link register to find the dalvik pc of teh target.
1356 * Where HH is 10 for the 1st inst, and 11 for the second and
1357 * the "o" field is each instruction's 11-bit contribution to the
1358 * 22-bit branch offset.
1359 * If the target is nearby, use a single-instruction bl.
1360 * If one or more threads is suspended, don't chain.
1362 void* dvmJitChain(void* tgtAddr, u4* branchAddr)
1364 int baseAddr = (u4) branchAddr + 4;
1365 int branchOffset = (int) tgtAddr - baseAddr;
1370 * Only chain translations when there is no urge to ask all threads to
1371 * suspend themselves via the interpreter.
1373 if ((gDvmJit.pProfTable != NULL) && (gDvm.sumThreadSuspendCount == 0) &&
1374 (gDvmJit.codeCacheFull == false)) {
1375 assert((branchOffset >= -(1<<22)) && (branchOffset <= ((1<<22)-2)));
1377 gDvmJit.translationChains++;
1379 COMPILER_TRACE_CHAINING(
1380 LOGD("Jit Runtime: chaining 0x%x to 0x%x\n",
1381 (int) branchAddr, (int) tgtAddr & -2));
1384 * NOTE: normally, all translations are Thumb[2] mode, with
1385 * a single exception: the default TEMPLATE_INTERPRET
1386 * pseudo-translation. If the need ever arises to
1387 * mix Arm & Thumb[2] translations, the following code should be
1390 thumbTarget = (tgtAddr != gDvmJit.interpretTemplate);
1392 newInst = assembleChainingBranch(branchOffset, thumbTarget);
1394 *branchAddr = newInst;
1395 cacheflush((long)branchAddr, (long)branchAddr + 4, 0);
1396 gDvmJit.hasNewChain = true;
1403 * Attempt to enqueue a work order to patch an inline cache for a predicted
1404 * chaining cell for virtual/interface calls.
1406 static bool inlineCachePatchEnqueue(PredictedChainingCell *cellAddr,
1407 PredictedChainingCell *newContent)
1412 * Make sure only one thread gets here since updating the cell (ie fast
1413 * path and queueing the request (ie the queued path) have to be done
1414 * in an atomic fashion.
1416 dvmLockMutex(&gDvmJit.compilerICPatchLock);
1418 /* Fast path for uninitialized chaining cell */
1419 if (cellAddr->clazz == NULL &&
1420 cellAddr->branch == PREDICTED_CHAIN_BX_PAIR_INIT) {
1421 cellAddr->method = newContent->method;
1422 cellAddr->branch = newContent->branch;
1423 cellAddr->counter = newContent->counter;
1425 * The update order matters - make sure clazz is updated last since it
1426 * will bring the uninitialized chaining cell to life.
1429 cellAddr->clazz = newContent->clazz;
1430 cacheflush((intptr_t) cellAddr, (intptr_t) (cellAddr+1), 0);
1431 #if defined(WITH_JIT_TUNING)
1432 gDvmJit.icPatchFast++;
1436 * Otherwise the patch request will be queued and handled in the next
1437 * GC cycle. At that time all other mutator threads are suspended so
1438 * there will be no partial update in the inline cache state.
1440 else if (gDvmJit.compilerICPatchIndex < COMPILER_IC_PATCH_QUEUE_SIZE) {
1441 int index = gDvmJit.compilerICPatchIndex++;
1442 gDvmJit.compilerICPatchQueue[index].cellAddr = cellAddr;
1443 gDvmJit.compilerICPatchQueue[index].cellContent = *newContent;
1444 #if defined(WITH_JIT_TUNING)
1445 gDvmJit.icPatchQueued++;
1448 /* Queue is full - just drop this patch request */
1451 #if defined(WITH_JIT_TUNING)
1452 gDvmJit.icPatchDropped++;
1456 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
1461 * This method is called from the invoke templates for virtual and interface
1462 * methods to speculatively setup a chain to the callee. The templates are
1463 * written in assembly and have setup method, cell, and clazz at r0, r2, and
1464 * r3 respectively, so there is a unused argument in the list. Upon return one
1465 * of the following three results may happen:
1466 * 1) Chain is not setup because the callee is native. Reset the rechain
1467 * count to a big number so that it will take a long time before the next
1468 * rechain attempt to happen.
1469 * 2) Chain is not setup because the callee has not been created yet. Reset
1470 * the rechain count to a small number and retry in the near future.
1471 * 3) Ask all other threads to stop before patching this chaining cell.
1472 * This is required because another thread may have passed the class check
1473 * but hasn't reached the chaining cell yet to follow the chain. If we
1474 * patch the content before halting the other thread, there could be a
1475 * small window for race conditions to happen that it may follow the new
1476 * but wrong chain to invoke a different method.
1478 const Method *dvmJitToPatchPredictedChain(const Method *method,
1480 PredictedChainingCell *cell,
1481 const ClassObject *clazz)
1483 #if defined(WITH_SELF_VERIFICATION)
1484 /* Disable chaining and prevent this from triggering again for a while */
1485 cell->counter = PREDICTED_CHAIN_COUNTER_AVOID;
1486 cacheflush((long) cell, (long) (cell+1), 0);
1489 /* Don't come back here for a long time if the method is native */
1490 if (dvmIsNativeMethod(method)) {
1491 cell->counter = PREDICTED_CHAIN_COUNTER_AVOID;
1492 cacheflush((long) cell, (long) (cell+1), 0);
1493 COMPILER_TRACE_CHAINING(
1494 LOGD("Jit Runtime: predicted chain %p to native method %s ignored",
1495 cell, method->name));
1498 int tgtAddr = (int) dvmJitGetCodeAddr(method->insns);
1501 * Compilation not made yet for the callee. Reset the counter to a small
1502 * value and come back to check soon.
1504 if ((tgtAddr == 0) || ((void*)tgtAddr == gDvmJit.interpretTemplate)) {
1506 * Wait for a few invocations (currently set to be 16) before trying
1507 * to setup the chain again.
1509 cell->counter = PREDICTED_CHAIN_COUNTER_DELAY;
1510 cacheflush((long) cell, (long) (cell+1), 0);
1511 COMPILER_TRACE_CHAINING(
1512 LOGD("Jit Runtime: predicted chain %p to method %s%s delayed",
1513 cell, method->clazz->descriptor, method->name));
1517 PredictedChainingCell newCell;
1519 /* Avoid back-to-back orders to the same cell */
1520 cell->counter = PREDICTED_CHAIN_COUNTER_AVOID;
1522 int baseAddr = (int) cell + 4; // PC is cur_addr + 4
1523 int branchOffset = tgtAddr - baseAddr;
1525 newCell.branch = assembleChainingBranch(branchOffset, true);
1526 newCell.clazz = clazz;
1527 newCell.method = method;
1528 newCell.counter = PREDICTED_CHAIN_COUNTER_RECHAIN;
1531 * Enter the work order to the queue and the chaining cell will be patched
1532 * the next time a safe point is entered.
1534 * If the enqueuing fails reset the rechain count to a normal value so that
1535 * it won't get indefinitely delayed.
1537 if (!inlineCachePatchEnqueue(cell, &newCell)) {
1538 cell->counter = PREDICTED_CHAIN_COUNTER_RECHAIN;
1546 * Patch the inline cache content based on the content passed from the work
1549 void dvmCompilerPatchInlineCache(void)
1552 PredictedChainingCell *minAddr, *maxAddr;
1554 /* Nothing to be done */
1555 if (gDvmJit.compilerICPatchIndex == 0) return;
1558 * Since all threads are already stopped we don't really need to acquire
1559 * the lock. But race condition can be easily introduced in the future w/o
1560 * paying attention so we still acquire the lock here.
1562 dvmLockMutex(&gDvmJit.compilerICPatchLock);
1564 //LOGD("Number of IC patch work orders: %d", gDvmJit.compilerICPatchIndex);
1566 /* Initialize the min/max address range */
1567 minAddr = (PredictedChainingCell *)
1568 ((char *) gDvmJit.codeCache + gDvmJit.codeCacheSize);
1569 maxAddr = (PredictedChainingCell *) gDvmJit.codeCache;
1571 for (i = 0; i < gDvmJit.compilerICPatchIndex; i++) {
1572 PredictedChainingCell *cellAddr =
1573 gDvmJit.compilerICPatchQueue[i].cellAddr;
1574 PredictedChainingCell *cellContent =
1575 &gDvmJit.compilerICPatchQueue[i].cellContent;
1577 if (cellAddr->clazz == NULL) {
1578 COMPILER_TRACE_CHAINING(
1579 LOGD("Jit Runtime: predicted chain %p to %s (%s) initialized",
1581 cellContent->clazz->descriptor,
1582 cellContent->method->name));
1584 COMPILER_TRACE_CHAINING(
1585 LOGD("Jit Runtime: predicted chain %p from %s to %s (%s) "
1588 cellAddr->clazz->descriptor,
1589 cellContent->clazz->descriptor,
1590 cellContent->method->name));
1593 /* Patch the chaining cell */
1594 *cellAddr = *cellContent;
1595 minAddr = (cellAddr < minAddr) ? cellAddr : minAddr;
1596 maxAddr = (cellAddr > maxAddr) ? cellAddr : maxAddr;
1599 /* Then synchronize the I/D cache */
1600 cacheflush((long) minAddr, (long) (maxAddr+1), 0);
1602 gDvmJit.compilerICPatchIndex = 0;
1603 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
1607 * Unchain a trace given the starting address of the translation
1608 * in the code cache. Refer to the diagram in dvmCompilerAssembleLIR.
1609 * Returns the address following the last cell unchained. Note that
1610 * the incoming codeAddr is a thumb code address, and therefore has
1613 u4* dvmJitUnchain(void* codeAddr)
1615 u2* pChainCellOffset = (u2*)((char*)codeAddr - 3);
1616 u2 chainCellOffset = *pChainCellOffset;
1617 ChainCellCounts *pChainCellCounts =
1618 (ChainCellCounts*)((char*)codeAddr + chainCellOffset - 3);
1626 PredictedChainingCell *predChainCell;
1628 /* Get total count of chain cells */
1629 for (i = 0, cellSize = 0; i < kChainingCellGap; i++) {
1630 if (i != kChainingCellInvokePredicted) {
1631 cellSize += pChainCellCounts->u.count[i] * 2;
1633 cellSize += pChainCellCounts->u.count[i] * 4;
1638 return (u4 *) pChainCellCounts;
1640 /* Locate the beginning of the chain cell region */
1641 pStart = pChainCells = ((u4 *) pChainCellCounts) - cellSize -
1642 pChainCellCounts->u.count[kChainingCellGap];
1644 /* The cells are sorted in order - walk through them and reset */
1645 for (i = 0; i < kChainingCellGap; i++) {
1646 int elemSize = 2; /* Most chaining cell has two words */
1647 if (i == kChainingCellInvokePredicted) {
1651 for (j = 0; j < pChainCellCounts->u.count[i]; j++) {
1654 case kChainingCellNormal:
1655 targetOffset = offsetof(InterpState,
1656 jitToInterpEntries.dvmJitToInterpNormal);
1658 case kChainingCellHot:
1659 case kChainingCellInvokeSingleton:
1660 targetOffset = offsetof(InterpState,
1661 jitToInterpEntries.dvmJitToInterpTraceSelect);
1663 case kChainingCellInvokePredicted:
1665 predChainCell = (PredictedChainingCell *) pChainCells;
1667 * There could be a race on another mutator thread to use
1668 * this particular predicted cell and the check has passed
1669 * the clazz comparison. So we cannot safely wipe the
1670 * method and branch but it is safe to clear the clazz,
1671 * which serves as the key.
1673 predChainCell->clazz = PREDICTED_CHAIN_CLAZZ_INIT;
1675 #if defined(WITH_SELF_VERIFICATION)
1676 case kChainingCellBackwardBranch:
1677 targetOffset = offsetof(InterpState,
1678 jitToInterpEntries.dvmJitToInterpBackwardBranch);
1680 #elif defined(WITH_JIT_TUNING)
1681 case kChainingCellBackwardBranch:
1682 targetOffset = offsetof(InterpState,
1683 jitToInterpEntries.dvmJitToInterpNormal);
1687 targetOffset = 0; // make gcc happy
1688 LOGE("Unexpected chaining type: %d", i);
1689 dvmAbort(); // dvmAbort OK here - can't safely recover
1691 COMPILER_TRACE_CHAINING(
1692 LOGD("Jit Runtime: unchaining 0x%x", (int)pChainCells));
1694 * Thumb code sequence for a chaining cell is:
1695 * ldr r0, rGLUE, #<word offset>
1698 if (i != kChainingCellInvokePredicted) {
1699 targetOffset = targetOffset >> 2; /* convert to word offset */
1700 thumb1 = 0x6800 | (targetOffset << 6) |
1701 (rGLUE << 3) | (r0 << 0);
1702 thumb2 = 0x4780 | (r0 << 3);
1703 newInst = thumb2<<16 | thumb1;
1704 *pChainCells = newInst;
1706 pChainCells += elemSize; /* Advance by a fixed number of words */
1712 /* Unchain all translation in the cache. */
1713 void dvmJitUnchainAll()
1715 u4* lowAddress = NULL;
1716 u4* highAddress = NULL;
1718 if (gDvmJit.pJitEntryTable != NULL) {
1719 COMPILER_TRACE_CHAINING(LOGD("Jit Runtime: unchaining all"));
1720 dvmLockMutex(&gDvmJit.tableLock);
1721 for (i = 0; i < gDvmJit.jitTableSize; i++) {
1722 if (gDvmJit.pJitEntryTable[i].dPC &&
1723 gDvmJit.pJitEntryTable[i].codeAddress &&
1724 (gDvmJit.pJitEntryTable[i].codeAddress !=
1725 gDvmJit.interpretTemplate)) {
1728 dvmJitUnchain(gDvmJit.pJitEntryTable[i].codeAddress);
1729 if (lowAddress == NULL ||
1730 (u4*)gDvmJit.pJitEntryTable[i].codeAddress < lowAddress)
1731 lowAddress = lastAddress;
1732 if (lastAddress > highAddress)
1733 highAddress = lastAddress;
1736 cacheflush((long)lowAddress, (long)highAddress, 0);
1737 dvmUnlockMutex(&gDvmJit.tableLock);
1738 gDvmJit.translationChains = 0;
1740 gDvmJit.hasNewChain = false;
1743 typedef struct jitProfileAddrToLine {
1746 } jitProfileAddrToLine;
1749 /* Callback function to track the bytecode offset/line number relationiship */
1750 static int addrToLineCb (void *cnxt, u4 bytecodeOffset, u4 lineNum)
1752 jitProfileAddrToLine *addrToLine = (jitProfileAddrToLine *) cnxt;
1754 /* Best match so far for this offset */
1755 if (addrToLine->bytecodeOffset >= bytecodeOffset) {
1756 addrToLine->lineNum = lineNum;
1761 char *getTraceBase(const JitEntry *p)
1763 return (char*)p->codeAddress -
1764 (6 + (p->u.info.instructionSet == DALVIK_JIT_ARM ? 0 : 1));
1767 /* Dumps profile info for a single trace */
1768 static int dumpTraceProfile(JitEntry *p, bool silent, bool reset,
1771 ChainCellCounts* pCellCounts;
1773 u4* pExecutionCount;
1776 JitTraceDescription *desc;
1777 const Method* method;
1779 traceBase = getTraceBase(p);
1781 if (p->codeAddress == NULL) {
1783 LOGD("TRACEPROFILE 0x%08x 0 NULL 0 0", (int)traceBase);
1786 if (p->codeAddress == gDvmJit.interpretTemplate) {
1788 LOGD("TRACEPROFILE 0x%08x 0 INTERPRET_ONLY 0 0", (int)traceBase);
1792 pExecutionCount = (u4*) (traceBase);
1793 executionCount = *pExecutionCount;
1795 *pExecutionCount =0;
1798 return executionCount;
1800 pCellOffset = (u2*) (traceBase + 4);
1801 pCellCounts = (ChainCellCounts*) ((char *)pCellOffset + *pCellOffset);
1802 desc = (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
1803 method = desc->method;
1804 char *methodDesc = dexProtoCopyMethodDescriptor(&method->prototype);
1805 jitProfileAddrToLine addrToLine = {0, desc->trace[0].frag.startOffset};
1808 * We may end up decoding the debug information for the same method
1809 * multiple times, but the tradeoff is we don't need to allocate extra
1810 * space to store the addr/line mapping. Since this is a debugging feature
1811 * and done infrequently so the slower but simpler mechanism should work
1814 dexDecodeDebugInfo(method->clazz->pDvmDex->pDexFile,
1815 dvmGetMethodCode(method),
1816 method->clazz->descriptor,
1817 method->prototype.protoIdx,
1818 method->accessFlags,
1819 addrToLineCb, NULL, &addrToLine);
1821 LOGD("TRACEPROFILE 0x%08x % 10d %5.2f%% [%#x(+%d), %d] %s%s;%s",
1824 ((float ) executionCount) / sum * 100.0,
1825 desc->trace[0].frag.startOffset,
1826 desc->trace[0].frag.numInsts,
1828 method->clazz->descriptor, method->name, methodDesc);
1831 return executionCount;
1834 /* Create a copy of the trace descriptor of an existing compilation */
1835 JitTraceDescription *dvmCopyTraceDescriptor(const u2 *pc,
1836 const JitEntry *knownEntry)
1838 const JitEntry *jitEntry = knownEntry ? knownEntry : dvmFindJitEntry(pc);
1839 if (jitEntry == NULL) return NULL;
1841 /* Find out the startint point */
1842 char *traceBase = getTraceBase(jitEntry);
1844 /* Then find out the starting point of the chaining cell */
1845 u2 *pCellOffset = (u2*) (traceBase + 4);
1846 ChainCellCounts *pCellCounts =
1847 (ChainCellCounts*) ((char *)pCellOffset + *pCellOffset);
1849 /* From there we can find out the starting point of the trace descriptor */
1850 JitTraceDescription *desc =
1851 (JitTraceDescription*) ((char*)pCellCounts + sizeof(*pCellCounts));
1853 /* Now make a copy and return */
1854 int descSize = jitTraceDescriptionSize(desc);
1855 JitTraceDescription *newCopy = (JitTraceDescription *) malloc(descSize);
1856 memcpy(newCopy, desc, descSize);
1860 /* Handy function to retrieve the profile count */
1861 static inline int getProfileCount(const JitEntry *entry)
1863 if (entry->dPC == 0 || entry->codeAddress == 0)
1865 u4 *pExecutionCount = (u4 *) getTraceBase(entry);
1867 return *pExecutionCount;
1871 /* qsort callback function */
1872 static int sortTraceProfileCount(const void *entry1, const void *entry2)
1874 const JitEntry *jitEntry1 = entry1;
1875 const JitEntry *jitEntry2 = entry2;
1877 int count1 = getProfileCount(jitEntry1);
1878 int count2 = getProfileCount(jitEntry2);
1879 return (count1 == count2) ? 0 : ((count1 > count2) ? -1 : 1);
1882 /* Sort the trace profile counts and dump them */
1883 void dvmCompilerSortAndPrintTraceProfiles()
1885 JitEntry *sortedEntries;
1887 unsigned long sum = 0;
1890 /* Make sure that the table is not changing */
1891 dvmLockMutex(&gDvmJit.tableLock);
1893 /* Sort the entries by descending order */
1894 sortedEntries = malloc(sizeof(JitEntry) * gDvmJit.jitTableSize);
1895 if (sortedEntries == NULL)
1897 memcpy(sortedEntries, gDvmJit.pJitEntryTable,
1898 sizeof(JitEntry) * gDvmJit.jitTableSize);
1899 qsort(sortedEntries, gDvmJit.jitTableSize, sizeof(JitEntry),
1900 sortTraceProfileCount);
1902 /* Analyze the sorted entries */
1903 for (i=0; i < gDvmJit.jitTableSize; i++) {
1904 if (sortedEntries[i].dPC != 0) {
1905 sum += dumpTraceProfile(&sortedEntries[i],
1918 LOGD("JIT: Average execution count -> %d",(int)(sum / numTraces));
1920 /* Dump the sorted entries. The count of each trace will be reset to 0. */
1921 for (i=0; i < gDvmJit.jitTableSize; i++) {
1922 if (sortedEntries[i].dPC != 0) {
1923 dumpTraceProfile(&sortedEntries[i],
1930 for (i=0; i < gDvmJit.jitTableSize && i < 10; i++) {
1931 JitTraceDescription* desc =
1932 dvmCopyTraceDescriptor(NULL, &sortedEntries[i]);
1933 dvmCompilerWorkEnqueue(sortedEntries[i].dPC,
1934 kWorkOrderTraceDebug, desc);
1937 free(sortedEntries);
1939 dvmUnlockMutex(&gDvmJit.tableLock);
1943 #if defined(WITH_SELF_VERIFICATION)
1945 * The following are used to keep compiled loads and stores from modifying
1946 * memory during self verification mode.
1948 * Stores do not modify memory. Instead, the address and value pair are stored
1949 * into heapSpace. Addresses within heapSpace are unique. For accesses smaller
1950 * than a word, the word containing the address is loaded first before being
1953 * Loads check heapSpace first and return data from there if an entry exists.
1954 * Otherwise, data is loaded from memory as usual.
1957 /* Used to specify sizes of memory operations */
1968 /* Load the value of a decoded register from the stack */
1969 static int selfVerificationMemRegLoad(int* sp, int reg)
1974 /* Load the value of a decoded doubleword register from the stack */
1975 static s8 selfVerificationMemRegLoadDouble(int* sp, int reg)
1977 return *((s8*)(sp + reg));
1980 /* Store the value of a decoded register out to the stack */
1981 static void selfVerificationMemRegStore(int* sp, int data, int reg)
1986 /* Store the value of a decoded doubleword register out to the stack */
1987 static void selfVerificationMemRegStoreDouble(int* sp, s8 data, int reg)
1989 *((s8*)(sp + reg)) = data;
1993 * Load the specified size of data from the specified address, checking
1994 * heapSpace first if Self Verification mode wrote to it previously, and
1995 * falling back to actual memory otherwise.
1997 static int selfVerificationLoad(int addr, int size)
1999 Thread *self = dvmThreadSelf();
2000 ShadowSpace *shadowSpace = self->shadowSpace;
2001 ShadowHeap *heapSpacePtr;
2004 int maskedAddr = addr & 0xFFFFFFFC;
2005 int alignment = addr & 0x3;
2007 for (heapSpacePtr = shadowSpace->heapSpace;
2008 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
2009 if (heapSpacePtr->addr == maskedAddr) {
2010 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
2017 data = *((u1*) addr);
2020 data = *((s1*) addr);
2023 data = *((u2*) addr);
2025 case kSVSignedHalfword:
2026 data = *((s2*) addr);
2029 data = *((u4*) addr);
2032 LOGE("*** ERROR: BAD SIZE IN selfVerificationLoad: %d", size);
2037 //LOGD("*** HEAP LOAD: Addr: 0x%x Data: 0x%x Size: %d", addr, data, size);
2041 /* Like selfVerificationLoad, but specifically for doublewords */
2042 static s8 selfVerificationLoadDoubleword(int addr)
2044 Thread *self = dvmThreadSelf();
2045 ShadowSpace* shadowSpace = self->shadowSpace;
2046 ShadowHeap* heapSpacePtr;
2049 unsigned int data = *((unsigned int*) addr);
2050 unsigned int data2 = *((unsigned int*) addr2);
2052 for (heapSpacePtr = shadowSpace->heapSpace;
2053 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
2054 if (heapSpacePtr->addr == addr) {
2055 data = heapSpacePtr->data;
2056 } else if (heapSpacePtr->addr == addr2) {
2057 data2 = heapSpacePtr->data;
2061 //LOGD("*** HEAP LOAD DOUBLEWORD: Addr: 0x%x Data: 0x%x Data2: 0x%x",
2062 // addr, data, data2);
2063 return (((s8) data2) << 32) | data;
2067 * Handles a store of a specified size of data to a specified address.
2068 * This gets logged as an addr/data pair in heapSpace instead of modifying
2069 * memory. Addresses in heapSpace are unique, and accesses smaller than a
2070 * word pull the entire word from memory first before updating.
2072 static void selfVerificationStore(int addr, int data, int size)
2074 Thread *self = dvmThreadSelf();
2075 ShadowSpace *shadowSpace = self->shadowSpace;
2076 ShadowHeap *heapSpacePtr;
2078 int maskedAddr = addr & 0xFFFFFFFC;
2079 int alignment = addr & 0x3;
2081 //LOGD("*** HEAP STORE: Addr: 0x%x Data: 0x%x Size: %d", addr, data, size);
2083 for (heapSpacePtr = shadowSpace->heapSpace;
2084 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
2085 if (heapSpacePtr->addr == maskedAddr) break;
2088 if (heapSpacePtr == shadowSpace->heapSpaceTail) {
2089 heapSpacePtr->addr = maskedAddr;
2090 heapSpacePtr->data = *((unsigned int*) maskedAddr);
2091 shadowSpace->heapSpaceTail++;
2094 addr = ((unsigned int) &(heapSpacePtr->data)) | alignment;
2097 *((u1*) addr) = data;
2100 *((s1*) addr) = data;
2103 *((u2*) addr) = data;
2105 case kSVSignedHalfword:
2106 *((s2*) addr) = data;
2109 *((u4*) addr) = data;
2112 LOGE("*** ERROR: BAD SIZE IN selfVerificationSave: %d", size);
2117 /* Like selfVerificationStore, but specifically for doublewords */
2118 static void selfVerificationStoreDoubleword(int addr, s8 double_data)
2120 Thread *self = dvmThreadSelf();
2121 ShadowSpace *shadowSpace = self->shadowSpace;
2122 ShadowHeap *heapSpacePtr;
2125 int data = double_data;
2126 int data2 = double_data >> 32;
2127 bool store1 = false, store2 = false;
2129 //LOGD("*** HEAP STORE DOUBLEWORD: Addr: 0x%x Data: 0x%x, Data2: 0x%x",
2130 // addr, data, data2);
2132 for (heapSpacePtr = shadowSpace->heapSpace;
2133 heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
2134 if (heapSpacePtr->addr == addr) {
2135 heapSpacePtr->data = data;
2137 } else if (heapSpacePtr->addr == addr2) {
2138 heapSpacePtr->data = data2;
2144 shadowSpace->heapSpaceTail->addr = addr;
2145 shadowSpace->heapSpaceTail->data = data;
2146 shadowSpace->heapSpaceTail++;
2149 shadowSpace->heapSpaceTail->addr = addr2;
2150 shadowSpace->heapSpaceTail->data = data2;
2151 shadowSpace->heapSpaceTail++;
2156 * Decodes the memory instruction at the address specified in the link
2157 * register. All registers (r0-r12,lr) and fp registers (d0-d15) are stored
2158 * consecutively on the stack beginning at the specified stack pointer.
2159 * Calls the proper Self Verification handler for the memory instruction and
2160 * updates the link register to point past the decoded memory instruction.
2162 void dvmSelfVerificationMemOpDecode(int lr, int* sp)
2165 kMemOpLdrPcRel = 0x09, // ldr(3) [01001] rd[10..8] imm_8[7..0]
2166 kMemOpRRR = 0x0A, // Full opcode is 7 bits
2167 kMemOp2Single = 0x0A, // Used for Vstrs and Vldrs
2168 kMemOpRRR2 = 0x0B, // Full opcode is 7 bits
2169 kMemOp2Double = 0x0B, // Used for Vstrd and Vldrd
2170 kMemOpStrRRI5 = 0x0C, // str(1) [01100] imm_5[10..6] rn[5..3] rd[2..0]
2171 kMemOpLdrRRI5 = 0x0D, // ldr(1) [01101] imm_5[10..6] rn[5..3] rd[2..0]
2172 kMemOpStrbRRI5 = 0x0E, // strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0]
2173 kMemOpLdrbRRI5 = 0x0F, // ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0]
2174 kMemOpStrhRRI5 = 0x10, // strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0]
2175 kMemOpLdrhRRI5 = 0x11, // ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0]
2176 kMemOpLdrSpRel = 0x13, // ldr(4) [10011] rd[10..8] imm_8[7..0]
2177 kMemOpStmia = 0x18, // stmia [11000] rn[10..8] reglist [7..0]
2178 kMemOpLdmia = 0x19, // ldmia [11001] rn[10..8] reglist [7..0]
2179 kMemOpStrRRR = 0x28, // str(2) [0101000] rm[8..6] rn[5..3] rd[2..0]
2180 kMemOpStrhRRR = 0x29, // strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0]
2181 kMemOpStrbRRR = 0x2A, // strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0]
2182 kMemOpLdrsbRRR = 0x2B, // ldrsb [0101011] rm[8..6] rn[5..3] rd[2..0]
2183 kMemOpLdrRRR = 0x2C, // ldr(2) [0101100] rm[8..6] rn[5..3] rd[2..0]
2184 kMemOpLdrhRRR = 0x2D, // ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0]
2185 kMemOpLdrbRRR = 0x2E, // ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0]
2186 kMemOpLdrshRRR = 0x2F, // ldrsh [0101111] rm[8..6] rn[5..3] rd[2..0]
2187 kMemOp2Stmia = 0xE88, // stmia [111010001000[ rn[19..16] mask[15..0]
2188 kMemOp2Ldmia = 0xE89, // ldmia [111010001001[ rn[19..16] mask[15..0]
2189 kMemOp2Stmia2 = 0xE8A, // stmia [111010001010[ rn[19..16] mask[15..0]
2190 kMemOp2Ldmia2 = 0xE8B, // ldmia [111010001011[ rn[19..16] mask[15..0]
2191 kMemOp2Vstr = 0xED8, // Used for Vstrs and Vstrd
2192 kMemOp2Vldr = 0xED9, // Used for Vldrs and Vldrd
2193 kMemOp2Vstr2 = 0xEDC, // Used for Vstrs and Vstrd
2194 kMemOp2Vldr2 = 0xEDD, // Used for Vstrs and Vstrd
2195 kMemOp2StrbRRR = 0xF80, /* str rt,[rn,rm,LSL #imm] [111110000000]
2196 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2197 kMemOp2LdrbRRR = 0xF81, /* ldrb rt,[rn,rm,LSL #imm] [111110000001]
2198 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2199 kMemOp2StrhRRR = 0xF82, /* str rt,[rn,rm,LSL #imm] [111110000010]
2200 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2201 kMemOp2LdrhRRR = 0xF83, /* ldrh rt,[rn,rm,LSL #imm] [111110000011]
2202 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2203 kMemOp2StrRRR = 0xF84, /* str rt,[rn,rm,LSL #imm] [111110000100]
2204 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2205 kMemOp2LdrRRR = 0xF85, /* ldr rt,[rn,rm,LSL #imm] [111110000101]
2206 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2207 kMemOp2StrbRRI12 = 0xF88, /* strb rt,[rn,#imm12] [111110001000]
2208 rt[15..12] rn[19..16] imm12[11..0] */
2209 kMemOp2LdrbRRI12 = 0xF89, /* ldrb rt,[rn,#imm12] [111110001001]
2210 rt[15..12] rn[19..16] imm12[11..0] */
2211 kMemOp2StrhRRI12 = 0xF8A, /* strh rt,[rn,#imm12] [111110001010]
2212 rt[15..12] rn[19..16] imm12[11..0] */
2213 kMemOp2LdrhRRI12 = 0xF8B, /* ldrh rt,[rn,#imm12] [111110001011]
2214 rt[15..12] rn[19..16] imm12[11..0] */
2215 kMemOp2StrRRI12 = 0xF8C, /* str(Imm,T3) rd,[rn,#imm12] [111110001100]
2216 rn[19..16] rt[15..12] imm12[11..0] */
2217 kMemOp2LdrRRI12 = 0xF8D, /* ldr(Imm,T3) rd,[rn,#imm12] [111110001101]
2218 rn[19..16] rt[15..12] imm12[11..0] */
2219 kMemOp2LdrsbRRR = 0xF91, /* ldrsb rt,[rn,rm,LSL #imm] [111110010001]
2220 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2221 kMemOp2LdrshRRR = 0xF93, /* ldrsh rt,[rn,rm,LSL #imm] [111110010011]
2222 rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0] */
2223 kMemOp2LdrsbRRI12 = 0xF99, /* ldrsb rt,[rn,#imm12] [111110011001]
2224 rt[15..12] rn[19..16] imm12[11..0] */
2225 kMemOp2LdrshRRI12 = 0xF9B, /* ldrsh rt,[rn,#imm12] [111110011011]
2226 rt[15..12] rn[19..16] imm12[11..0] */
2227 kMemOp2 = 0xE000, // top 3 bits set indicates Thumb2
2230 int addr, offset, data;
2231 long long double_data;
2234 unsigned int *lr_masked = (unsigned int *) (lr & 0xFFFFFFFE);
2235 unsigned int insn = *lr_masked;
2238 old_lr = selfVerificationMemRegLoad(sp, 13);
2240 if ((insn & kMemOp2) == kMemOp2) {
2241 insn = (insn << 16) | (insn >> 16);
2242 //LOGD("*** THUMB2 - Addr: 0x%x Insn: 0x%x", lr, insn);
2244 int opcode12 = (insn >> 20) & 0xFFF;
2245 int opcode6 = (insn >> 6) & 0x3F;
2246 int opcode4 = (insn >> 8) & 0xF;
2247 int imm2 = (insn >> 4) & 0x3;
2248 int imm8 = insn & 0xFF;
2249 int imm12 = insn & 0xFFF;
2250 int rd = (insn >> 12) & 0xF;
2251 int rm = insn & 0xF;
2252 int rn = (insn >> 16) & 0xF;
2253 int rt = (insn >> 12) & 0xF;
2256 // Update the link register
2257 selfVerificationMemRegStore(sp, old_lr+4, 13);
2259 // Determine whether the mem op is a store or load
2265 case kMemOp2StrbRRR:
2266 case kMemOp2StrhRRR:
2268 case kMemOp2StrbRRI12:
2269 case kMemOp2StrhRRI12:
2270 case kMemOp2StrRRI12:
2274 // Determine the size of the mem access
2276 case kMemOp2StrbRRR:
2277 case kMemOp2LdrbRRR:
2278 case kMemOp2StrbRRI12:
2279 case kMemOp2LdrbRRI12:
2282 case kMemOp2LdrsbRRR:
2283 case kMemOp2LdrsbRRI12:
2284 size = kSVSignedByte;
2286 case kMemOp2StrhRRR:
2287 case kMemOp2LdrhRRR:
2288 case kMemOp2StrhRRI12:
2289 case kMemOp2LdrhRRI12:
2292 case kMemOp2LdrshRRR:
2293 case kMemOp2LdrshRRI12:
2294 size = kSVSignedHalfword;
2300 if (opcode4 == kMemOp2Double) size = kSVDoubleword;
2310 // Load the value of the address
2311 addr = selfVerificationMemRegLoad(sp, rn);
2313 // Figure out the offset
2320 if (opcode4 == kMemOp2Single) {
2322 if (insn & 0x400000) rt |= 0x1;
2323 } else if (opcode4 == kMemOp2Double) {
2324 if (insn & 0x400000) rt |= 0x10;
2327 LOGE("*** ERROR: UNRECOGNIZED VECTOR MEM OP: %x", opcode4);
2332 case kMemOp2StrbRRR:
2333 case kMemOp2LdrbRRR:
2334 case kMemOp2StrhRRR:
2335 case kMemOp2LdrhRRR:
2338 case kMemOp2LdrsbRRR:
2339 case kMemOp2LdrshRRR:
2340 offset = selfVerificationMemRegLoad(sp, rm) << imm2;
2342 case kMemOp2StrbRRI12:
2343 case kMemOp2LdrbRRI12:
2344 case kMemOp2StrhRRI12:
2345 case kMemOp2LdrhRRI12:
2346 case kMemOp2StrRRI12:
2347 case kMemOp2LdrRRI12:
2348 case kMemOp2LdrsbRRI12:
2349 case kMemOp2LdrshRRI12:
2360 LOGE("*** ERROR: UNRECOGNIZED THUMB2 MEM OP: %x", opcode12);
2365 // Handle the decoded mem op accordingly
2367 if (size == kSVVariable) {
2368 LOGD("*** THUMB2 STMIA CURRENTLY UNUSED (AND UNTESTED)");
2370 int regList = insn & 0xFFFF;
2371 for (i = 0; i < 16; i++) {
2372 if (regList & 0x1) {
2373 data = selfVerificationMemRegLoad(sp, i);
2374 selfVerificationStore(addr, data, kSVWord);
2377 regList = regList >> 1;
2379 if (wBack) selfVerificationMemRegStore(sp, addr, rn);
2380 } else if (size == kSVDoubleword) {
2381 double_data = selfVerificationMemRegLoadDouble(sp, rt);
2382 selfVerificationStoreDoubleword(addr+offset, double_data);
2384 data = selfVerificationMemRegLoad(sp, rt);
2385 selfVerificationStore(addr+offset, data, size);
2388 if (size == kSVVariable) {
2389 LOGD("*** THUMB2 LDMIA CURRENTLY UNUSED (AND UNTESTED)");
2391 int regList = insn & 0xFFFF;
2392 for (i = 0; i < 16; i++) {
2393 if (regList & 0x1) {
2394 data = selfVerificationLoad(addr, kSVWord);
2395 selfVerificationMemRegStore(sp, data, i);
2398 regList = regList >> 1;
2400 if (wBack) selfVerificationMemRegStore(sp, addr, rn);
2401 } else if (size == kSVDoubleword) {
2402 double_data = selfVerificationLoadDoubleword(addr+offset);
2403 selfVerificationMemRegStoreDouble(sp, double_data, rt);
2405 data = selfVerificationLoad(addr+offset, size);
2406 selfVerificationMemRegStore(sp, data, rt);
2410 //LOGD("*** THUMB - Addr: 0x%x Insn: 0x%x", lr, insn);
2412 // Update the link register
2413 selfVerificationMemRegStore(sp, old_lr+2, 13);
2415 int opcode5 = (insn >> 11) & 0x1F;
2416 int opcode7 = (insn >> 9) & 0x7F;
2417 int imm = (insn >> 6) & 0x1F;
2418 int rd = (insn >> 8) & 0x7;
2419 int rm = (insn >> 6) & 0x7;
2420 int rn = (insn >> 3) & 0x7;
2421 int rt = insn & 0x7;
2423 // Determine whether the mem op is a store or load
2434 case kMemOpStrbRRI5:
2435 case kMemOpStrhRRI5:
2440 // Determine the size of the mem access
2449 case kMemOpLdrsbRRR:
2450 size = kSVSignedByte;
2456 case kMemOpLdrshRRR:
2457 size = kSVSignedHalfword;
2461 case kMemOpStrbRRI5:
2462 case kMemOpLdrbRRI5:
2465 case kMemOpStrhRRI5:
2466 case kMemOpLdrhRRI5:
2475 // Load the value of the address
2476 if (opcode5 == kMemOpLdrPcRel)
2477 addr = selfVerificationMemRegLoad(sp, 4);
2478 else if (opcode5 == kMemOpStmia || opcode5 == kMemOpLdmia)
2479 addr = selfVerificationMemRegLoad(sp, rd);
2481 addr = selfVerificationMemRegLoad(sp, rn);
2483 // Figure out the offset
2485 case kMemOpLdrPcRel:
2486 offset = (insn & 0xFF) << 2;
2491 offset = selfVerificationMemRegLoad(sp, rm);
2497 case kMemOpStrhRRI5:
2498 case kMemOpLdrhRRI5:
2501 case kMemOpStrbRRI5:
2502 case kMemOpLdrbRRI5:
2510 LOGE("*** ERROR: UNRECOGNIZED THUMB MEM OP: %x", opcode5);
2515 // Handle the decoded mem op accordingly
2517 if (size == kSVVariable) {
2519 int regList = insn & 0xFF;
2520 for (i = 0; i < 8; i++) {
2521 if (regList & 0x1) {
2522 data = selfVerificationMemRegLoad(sp, i);
2523 selfVerificationStore(addr, data, kSVWord);
2526 regList = regList >> 1;
2528 selfVerificationMemRegStore(sp, addr, rd);
2530 data = selfVerificationMemRegLoad(sp, rt);
2531 selfVerificationStore(addr+offset, data, size);
2534 if (size == kSVVariable) {
2537 int regList = insn & 0xFF;
2538 for (i = 0; i < 8; i++) {
2539 if (regList & 0x1) {
2540 if (i == rd) wBack = false;
2541 data = selfVerificationLoad(addr, kSVWord);
2542 selfVerificationMemRegStore(sp, data, i);
2545 regList = regList >> 1;
2547 if (wBack) selfVerificationMemRegStore(sp, addr, rd);
2549 data = selfVerificationLoad(addr+offset, size);
2550 selfVerificationMemRegStore(sp, data, rt);