1%def bincmp(condition=""):
2    /*
3     * Generic two-operand compare-and-branch operation.  Provide a "condition"
4     * fragment that specifies the comparison to perform.
5     *
6     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
7     */
8    /* if-cmp vA, vB, +CCCC */
9    lsr     w1, wINST, #12              // w1<- B
10    ubfx    w0, wINST, #8, #4           // w0<- A
11    GET_VREG w3, w1                     // w3<- vB
12    GET_VREG w2, w0                     // w2<- vA
13    cmp     w2, w3                      // compare (vA, vB)
14    b.${condition} 1f
15    FETCH_ADVANCE_INST 2
16    GET_INST_OPCODE ip                  // extract opcode from wINST
17    GOTO_OPCODE ip                      // jump to next instruction
181:
19    FETCH_S wINST, 1                    // wINST<- branch offset, in code units
20    BRANCH
21
22%def zcmp(compare="1", branch=""):
23    /*
24     * Generic one-operand compare-and-branch operation.  Provide a "condition"
25     * fragment that specifies the comparison to perform.
26     *
27     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
28     */
29    /* if-cmp vAA, +BBBB */
30    lsr     w0, wINST, #8               // w0<- AA
31    GET_VREG w2, w0                     // w2<- vAA
32    .if ${compare}
33    cmp     w2, #0                      // compare (vA, 0)
34    .endif
35    ${branch} 1f
36    FETCH_ADVANCE_INST 2
37    GET_INST_OPCODE ip                  // extract opcode from wINST
38    GOTO_OPCODE ip                      // jump to next instruction
391:
40    FETCH_S wINST, 1                    // w1<- branch offset, in code units
41    BRANCH
42
43%def op_goto():
44/*
45 * Unconditional branch, 8-bit offset.
46 *
47 * The branch distance is a signed code-unit offset, which we need to
48 * double to get a byte offset.
49 */
50    /* goto +AA */
51    sbfx    wINST, wINST, #8, #8           // wINST<- ssssssAA (sign-extended)
52    BRANCH
53
54%def op_goto_16():
55/*
56 * Unconditional branch, 16-bit offset.
57 *
58 * The branch distance is a signed code-unit offset, which we need to
59 * double to get a byte offset.
60 */
61    /* goto/16 +AAAA */
62    FETCH_S wINST, 1                    // wINST<- ssssAAAA (sign-extended)
63    BRANCH
64
65%def op_goto_32():
66/*
67 * Unconditional branch, 32-bit offset.
68 *
69 * The branch distance is a signed code-unit offset, which we need to
70 * double to get a byte offset.
71 *
72 * Because we need the SF bit set, we'll use an adds
73 * to convert from Dalvik offset to byte offset.
74 */
75    /* goto/32 +AAAAAAAA */
76    FETCH w0, 1                         // w0<- aaaa (lo)
77    FETCH w1, 2                         // w1<- AAAA (hi)
78    orr     wINST, w0, w1, lsl #16      // wINST<- AAAAaaaa
79    BRANCH
80
81%def op_if_eq():
82%  bincmp(condition="eq")
83
84%def op_if_eqz():
85%  zcmp(compare="0", branch="cbz     w2,")
86
87%def op_if_ge():
88%  bincmp(condition="ge")
89
90%def op_if_gez():
91%  zcmp(compare="0", branch="tbz     w2, #31,")
92
93%def op_if_gt():
94%  bincmp(condition="gt")
95
96%def op_if_gtz():
97%  zcmp(branch="b.gt")
98
99%def op_if_le():
100%  bincmp(condition="le")
101
102%def op_if_lez():
103%  zcmp(branch="b.le")
104
105%def op_if_lt():
106%  bincmp(condition="lt")
107
108%def op_if_ltz():
109%  zcmp(compare="0", branch="tbnz    w2, #31,")
110
111%def op_if_ne():
112%  bincmp(condition="ne")
113
114%def op_if_nez():
115%  zcmp(compare="0", branch="cbnz    w2,")
116
117%def op_packed_switch(func="NterpDoPackedSwitch"):
118/*
119 * Handle a packed-switch or sparse-switch instruction.  In both cases
120 * we decode it and hand it off to a helper function.
121 *
122 * We don't really expect backward branches in a switch statement, but
123 * they're perfectly legal, so we check for them here.
124 *
125 * for: packed-switch, sparse-switch
126 */
127    /* op vAA, +BBBB */
128    FETCH   w0, 1                       // x0<- 000000000000bbbb (lo)
129    FETCH_S x1, 2                       // x1<- ssssssssssssBBBB (hi)
130    lsr     w3, wINST, #8               // w3<- AA
131    orr     x0, x0, x1, lsl #16         // x0<- ssssssssBBBBbbbb
132    GET_VREG w1, w3                     // w1<- vAA
133    add     x0, xPC, x0, lsl #1         // x0<- PC + ssssssssBBBBbbbb*2
134    bl      $func                       // w0<- code-unit branch offset
135    sxtw    xINST, w0
136    BRANCH
137
138%def op_sparse_switch():
139%  op_packed_switch(func="NterpDoSparseSwitch")
140
141/*
142 * Return a 32-bit value.
143 */
144%def op_return(is_object="0", is_void="0", is_wide="0", is_no_barrier="0"):
145    .if $is_void
146      .if !$is_no_barrier
147      // Thread fence for constructor
148      dmb ishst
149      .endif
150    .else
151      lsr     w2, wINST, #8               // w2<- AA
152      .if $is_wide
153        GET_VREG_WIDE x0, w2                // x0<- vAA
154        // In case we're going back to compiled code, put the
155        // result also in d0
156        fmov d0, x0
157      .else
158        GET_VREG w0, w2                     // r0<- vAA
159        .if !$is_object
160        // In case we're going back to compiled code, put the
161        // result also in s0.
162        fmov s0, w0
163        .endif
164      .endif
165    .endif
166    .cfi_remember_state
167    ldr ip, [xREFS, #-8]
168    mov sp, ip
169    .cfi_def_cfa sp, CALLEE_SAVES_SIZE
170    RESTORE_ALL_CALLEE_SAVES
171    ret
172    .cfi_restore_state
173
174%def op_return_object():
175%  op_return(is_object="1", is_void="0", is_wide="0", is_no_barrier="0")
176
177%def op_return_void():
178%  op_return(is_object="0", is_void="1", is_wide="0", is_no_barrier="0")
179
180%def op_return_void_no_barrier():
181%  op_return(is_object="0", is_void="1", is_wide="0", is_no_barrier="1")
182
183%def op_return_wide():
184%  op_return(is_object="0", is_void="0", is_wide="1", is_no_barrier="0")
185
186%def op_throw():
187  EXPORT_PC
188  lsr      w2, wINST, #8               // r2<- AA
189  GET_VREG w0, w2                      // r0<- vAA (exception object)
190  mov x1, xSELF
191  bl art_quick_deliver_exception
192  brk 0
193