3 * Array get, 64 bits. vAA <- vBB[vCC].
5 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
7 /* aget-wide vAA, vBB, vCC */
8 FETCH(r0, 1) @ r0<- CCBB
9 mov r9, rINST, lsr #8 @ r9<- AA
10 and r2, r0, #255 @ r2<- BB
11 mov r3, r0, lsr #8 @ r3<- CC
12 GET_VREG(r0, r2) @ r0<- vBB (array object)
13 GET_VREG(r1, r3) @ r1<- vCC (requested index)
14 cmp r0, #0 @ null array object?
15 beq common_errNullObject @ yes, bail
16 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
17 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
18 cmp r1, r3 @ compare unsigned index, length
19 bcc .L${opcode}_finish @ okay, continue below
20 b common_errArrayIndex @ index >= length, bail
21 @ May want to swap the order of these two branches depending on how the
22 @ branch prediction (if any) handles conditional forward branches vs.
23 @ unconditional forward branches.
27 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
28 add r0, r0, #offArrayObject_contents
29 ldmia r0, {r2-r3} @ r2/r3 <- vBB[vCC]
30 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
31 GET_INST_OPCODE(ip) @ extract opcode from rINST
32 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
33 GOTO_OPCODE(ip) @ jump to next instruction