1%def header():
2/*
3 * Copyright (C) 2016 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *      http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18/*
19  Art assembly interpreter notes:
20
21  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
22  handle invoke, allows higher-level code to create frame & shadow frame.
23
24  Once that's working, support direct entry code & eliminate shadow frame (and
25  excess locals allocation.
26
27  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
28  base of the vreg array within the shadow frame.  Access the other fields,
29  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
30  the shadow frame mechanism of double-storing object references - via rFP &
31  number_of_vregs_.
32
33 */
34
35/*
36ARM EABI general notes:
37
38r0-r3 hold first 4 args to a method; they are not preserved across method calls
39r4-r8 are available for general use
40r9 is given special treatment in some situations, but not for us
41r10 (sl) seems to be generally available
42r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
43r12 (ip) is scratch -- not preserved across method calls
44r13 (sp) should be managed carefully in case a signal arrives
45r14 (lr) must be preserved
46r15 (pc) can be tinkered with directly
47
48r0 holds returns of <= 4 bytes
49r0-r1 hold returns of 8 bytes, low word in r0
50
51Callee must save/restore r4+ (except r12) if it modifies them.  If VFP
52is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
53s0-s15 (d0-d7, q0-a3) do not need to be.
54
55Stack is "full descending".  Only the arguments that don't fit in the first 4
56registers are placed on the stack.  "sp" points at the first stacked argument
57(i.e. the 5th arg).
58
59VFP: single-precision results in s0, double-precision results in d0.
60
61In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
6264-bit quantities (long long, double) must be 64-bit aligned.
63*/
64
65/*
66Mterp and ARM notes:
67
68The following registers have fixed assignments:
69
70  reg nick      purpose
71  r4  rPC       interpreted program counter, used for fetching instructions
72  r5  rFP       interpreted frame pointer, used for accessing locals and args
73  r6  rSELF     self (Thread) pointer
74  r7  rINST     first 16-bit code unit of current instruction
75  r8  rIBASE    interpreted instruction base pointer, used for computed goto
76  r10 rPROFILE  branch profiling countdown
77  r11 rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
78
79Macros are provided for common operations.  Each macro MUST emit only
80one instruction to make instruction-counting easier.  They MUST NOT alter
81unspecified registers or condition codes.
82*/
83
84/*
85 * This is a #include, not a %include, because we want the C pre-processor
86 * to expand the macros into assembler assignment statements.
87 */
88#include "asm_support.h"
89#include "interpreter/cfi_asm_support.h"
90
91#define MTERP_PROFILE_BRANCHES 1
92#define MTERP_LOGGING 0
93
94/* During bringup, we'll use the shadow frame model instead of rFP */
95/* single-purpose registers, given names for clarity */
96#define rPC      r4
97#define CFI_DEX  4  // DWARF register number of the register holding dex-pc (xPC).
98#define CFI_TMP  0  // DWARF register number of the first argument register (r0).
99#define rFP      r5
100#define rSELF    r6
101#define rINST    r7
102#define rIBASE   r8
103#define rPROFILE r10
104#define rREFS    r11
105
106/*
107 * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
108 * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
109 */
110#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
111#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
112#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
113#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
114#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
115#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
116#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
117#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
118#define OFF_FP_SHADOWFRAME OFF_FP(0)
119
120/*
121 * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
122 * be done *before* something throws.
123 *
124 * It's okay to do this more than once.
125 *
126 * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
127 * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
128 * offset into the code_items_[] array.  For effiency, we will "export" the
129 * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
130 * to convert to a dex pc when needed.
131 */
132.macro EXPORT_PC
133    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
134.endm
135
136.macro EXPORT_DEX_PC tmp
137    ldr  \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
138    str  rPC, [rFP, #OFF_FP_DEX_PC_PTR]
139    sub  \tmp, rPC, \tmp
140    asr  \tmp, #1
141    str  \tmp, [rFP, #OFF_FP_DEX_PC]
142.endm
143
144/*
145 * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
146 */
147.macro FETCH_INST
148    ldrh    rINST, [rPC]
149.endm
150
151/*
152 * Fetch the next instruction from the specified offset.  Advances rPC
153 * to point to the next instruction.  "_count" is in 16-bit code units.
154 *
155 * Because of the limited size of immediate constants on ARM, this is only
156 * suitable for small forward movements (i.e. don't try to implement "goto"
157 * with this).
158 *
159 * This must come AFTER anything that can throw an exception, or the
160 * exception catch may miss.  (This also implies that it must come after
161 * EXPORT_PC.)
162 */
163.macro FETCH_ADVANCE_INST count
164    ldrh    rINST, [rPC, #((\count)*2)]!
165.endm
166
167/*
168 * The operation performed here is similar to FETCH_ADVANCE_INST, except the
169 * src and dest registers are parameterized (not hard-wired to rPC and rINST).
170 */
171.macro PREFETCH_ADVANCE_INST dreg, sreg, count
172    ldrh    \dreg, [\sreg, #((\count)*2)]!
173.endm
174
175/*
176 * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
177 * rINST ahead of possible exception point.  Be sure to manually advance rPC
178 * later.
179 */
180.macro PREFETCH_INST count
181    ldrh    rINST, [rPC, #((\count)*2)]
182.endm
183
184/* Advance rPC by some number of code units. */
185.macro ADVANCE count
186  add  rPC, #((\count)*2)
187.endm
188
189/*
190 * Fetch the next instruction from an offset specified by _reg.  Updates
191 * rPC to point to the next instruction.  "_reg" must specify the distance
192 * in bytes, *not* 16-bit code units, and may be a signed value.
193 *
194 * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
195 * bits that hold the shift distance are used for the half/byte/sign flags.
196 * In some cases we can pre-double _reg for free, so we require a byte offset
197 * here.
198 */
199.macro FETCH_ADVANCE_INST_RB reg
200    ldrh    rINST, [rPC, \reg]!
201.endm
202
203/*
204 * Fetch a half-word code unit from an offset past the current PC.  The
205 * "_count" value is in 16-bit code units.  Does not advance rPC.
206 *
207 * The "_S" variant works the same but treats the value as signed.
208 */
209.macro FETCH reg, count
210    ldrh    \reg, [rPC, #((\count)*2)]
211.endm
212
213.macro FETCH_S reg, count
214    ldrsh   \reg, [rPC, #((\count)*2)]
215.endm
216
217/*
218 * Fetch one byte from an offset past the current PC.  Pass in the same
219 * "_count" as you would for FETCH, and an additional 0/1 indicating which
220 * byte of the halfword you want (lo/hi).
221 */
222.macro FETCH_B reg, count, byte
223    ldrb     \reg, [rPC, #((\count)*2+(\byte))]
224.endm
225
226/*
227 * Put the instruction's opcode field into the specified register.
228 */
229.macro GET_INST_OPCODE reg
230    and     \reg, rINST, #255
231.endm
232
233/*
234 * Put the prefetched instruction's opcode field into the specified register.
235 */
236.macro GET_PREFETCHED_OPCODE oreg, ireg
237    and     \oreg, \ireg, #255
238.endm
239
240/*
241 * Begin executing the opcode in _reg.  Because this only jumps within the
242 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
243 */
244.macro GOTO_OPCODE reg
245    add     pc, rIBASE, \reg, lsl #${handler_size_bits}
246.endm
247.macro GOTO_OPCODE_BASE base,reg
248    add     pc, \base, \reg, lsl #${handler_size_bits}
249.endm
250
251/*
252 * Get/set the 32-bit value from a Dalvik register.
253 */
254.macro GET_VREG reg, vreg
255    ldr     \reg, [rFP, \vreg, lsl #2]
256.endm
257.macro SET_VREG reg, vreg
258    str     \reg, [rFP, \vreg, lsl #2]
259    mov     \reg, #0
260    str     \reg, [rREFS, \vreg, lsl #2]
261.endm
262.macro SET_VREG_WIDE regLo, regHi, vreg
263    add     ip, rFP, \vreg, lsl #2
264    strd    \regLo, \regHi, [ip]
265    mov     \regLo, #0
266    mov     \regHi, #0
267    add     ip, rREFS, \vreg, lsl #2
268    strd    \regLo, \regHi, [ip]
269.endm
270.macro SET_VREG_OBJECT reg, vreg, tmpreg
271    str     \reg, [rFP, \vreg, lsl #2]
272    str     \reg, [rREFS, \vreg, lsl #2]
273.endm
274.macro SET_VREG_SHADOW reg, vreg
275    str     \reg, [rREFS, \vreg, lsl #2]
276.endm
277.macro SET_VREG_FLOAT reg, vreg, tmpreg
278    add     \tmpreg, rFP, \vreg, lsl #2
279    fsts    \reg, [\tmpreg]
280    mov     \tmpreg, #0
281    str     \tmpreg, [rREFS, \vreg, lsl #2]
282.endm
283
284/*
285 * Clear the corresponding shadow regs for a vreg pair
286 */
287.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
288    mov     \tmp1, #0
289    add     \tmp2, \vreg, #1
290    SET_VREG_SHADOW \tmp1, \vreg
291    SET_VREG_SHADOW \tmp1, \tmp2
292.endm
293
294/*
295 * Convert a virtual register index into an address.
296 */
297.macro VREG_INDEX_TO_ADDR reg, vreg
298    add     \reg, rFP, \vreg, lsl #2   /* WARNING/FIXME: handle shadow frame vreg zero if store */
299.endm
300
301.macro GET_VREG_WIDE_BY_ADDR reg0, reg1, addr
302    ldmia \addr, {\reg0, \reg1}
303.endm
304.macro SET_VREG_WIDE_BY_ADDR reg0, reg1, addr
305    stmia \addr, {\reg0, \reg1}
306.endm
307.macro GET_VREG_FLOAT_BY_ADDR reg, addr
308    flds \reg, [\addr]
309.endm
310.macro SET_VREG_FLOAT_BY_ADDR reg, addr
311    fsts \reg, [\addr]
312.endm
313.macro GET_VREG_DOUBLE_BY_ADDR reg, addr
314    fldd \reg, [\addr]
315.endm
316.macro SET_VREG_DOUBLE_BY_ADDR reg, addr
317    fstd \reg, [\addr]
318.endm
319
320/*
321 * Refresh handler table.
322 */
323.macro REFRESH_IBASE
324  ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
325.endm
326
327/*
328 * function support macros.
329 */
330.macro ENTRY name
331    .arm
332    .type \name, #function
333    .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
334    .global \name
335    /* Cache alignment for function entry */
336    .balign 16
337\name:
338.endm
339
340.macro END name
341    .size \name, .-\name
342.endm
343
344// Macro to unpoison (negate) the reference for heap poisoning.
345.macro UNPOISON_HEAP_REF rRef
346#ifdef USE_HEAP_POISONING
347    rsb \rRef, \rRef, #0
348#endif  // USE_HEAP_POISONING
349.endm
350
351%def entry():
352/*
353 * Copyright (C) 2016 The Android Open Source Project
354 *
355 * Licensed under the Apache License, Version 2.0 (the "License");
356 * you may not use this file except in compliance with the License.
357 * You may obtain a copy of the License at
358 *
359 *      http://www.apache.org/licenses/LICENSE-2.0
360 *
361 * Unless required by applicable law or agreed to in writing, software
362 * distributed under the License is distributed on an "AS IS" BASIS,
363 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
364 * See the License for the specific language governing permissions and
365 * limitations under the License.
366 */
367/*
368 * Interpreter entry point.
369 */
370
371    .text
372    .align  2
373
374/*
375 * On entry:
376 *  r0  Thread* self/
377 *  r1  insns_
378 *  r2  ShadowFrame
379 *  r3  JValue* result_register
380 *
381 */
382
383ENTRY ExecuteMterpImpl
384    .cfi_startproc
385    stmfd   sp!, {r3-r10,fp,lr}         @ save 10 regs, (r3 just to align 64)
386    .cfi_adjust_cfa_offset 40
387    .cfi_rel_offset r3, 0
388    .cfi_rel_offset r4, 4
389    .cfi_rel_offset r5, 8
390    .cfi_rel_offset r6, 12
391    .cfi_rel_offset r7, 16
392    .cfi_rel_offset r8, 20
393    .cfi_rel_offset r9, 24
394    .cfi_rel_offset r10, 28
395    .cfi_rel_offset fp, 32
396    .cfi_rel_offset lr, 36
397
398    /* Remember the return register */
399    str     r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
400
401    /* Remember the dex instruction pointer */
402    str     r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
403
404    /* set up "named" registers */
405    mov     rSELF, r0
406    ldr     r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
407    add     rFP, r2, #SHADOWFRAME_VREGS_OFFSET     @ point to vregs.
408    VREG_INDEX_TO_ADDR rREFS, r0                   @ point to reference array in shadow frame
409    ldr     r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET]   @ Get starting dex_pc.
410    add     rPC, r1, r0, lsl #1                    @ Create direct pointer to 1st dex opcode
411    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
412    EXPORT_PC
413
414    /* Starting ibase */
415    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
416
417    /* Set up for backwards branches & osr profiling */
418    ldr     r0, [rFP, #OFF_FP_METHOD]
419    add     r1, rFP, #OFF_FP_SHADOWFRAME
420    mov     r2, rSELF
421    bl      MterpSetUpHotnessCountdown
422    mov     rPROFILE, r0                @ Starting hotness countdown to rPROFILE
423
424    /* start executing the instruction at rPC */
425    FETCH_INST                          @ load rINST from rPC
426    GET_INST_OPCODE ip                  @ extract opcode from rINST
427    GOTO_OPCODE ip                      @ jump to next instruction
428    /* NOTE: no fallthrough */
429    // cfi info continues, and covers the whole mterp implementation.
430    END ExecuteMterpImpl
431
432%def dchecks_before_helper():
433    // Call C++ to do debug checks and return to the handler using tail call.
434    .extern MterpCheckBefore
435    mov    r0, rSELF
436    add    r1, rFP, #OFF_FP_SHADOWFRAME
437    mov    r2, rPC
438    b      MterpCheckBefore     @ (self, shadow_frame, dex_pc_ptr)  @ Tail call.
439
440%def opcode_pre():
441%  add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
442    #if !defined(NDEBUG)
443    bl     mterp_dchecks_before_helper
444    #endif
445
446%def fallback():
447/* Transfer stub to alternate interpreter */
448    b    MterpFallback
449
450
451%def helpers():
452    ENTRY MterpHelpers
453
454%def footer():
455/*
456 * ===========================================================================
457 *  Common subroutines and data
458 * ===========================================================================
459 */
460
461    .text
462    .align  2
463
464/*
465 * We've detected a condition that will result in an exception, but the exception
466 * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
467 * TUNING: for consistency, we may want to just go ahead and handle these here.
468 */
469common_errDivideByZero:
470    EXPORT_PC
471#if MTERP_LOGGING
472    mov  r0, rSELF
473    add  r1, rFP, #OFF_FP_SHADOWFRAME
474    bl MterpLogDivideByZeroException
475#endif
476    b MterpCommonFallback
477
478common_errArrayIndex:
479    EXPORT_PC
480#if MTERP_LOGGING
481    mov  r0, rSELF
482    add  r1, rFP, #OFF_FP_SHADOWFRAME
483    bl MterpLogArrayIndexException
484#endif
485    b MterpCommonFallback
486
487common_errNegativeArraySize:
488    EXPORT_PC
489#if MTERP_LOGGING
490    mov  r0, rSELF
491    add  r1, rFP, #OFF_FP_SHADOWFRAME
492    bl MterpLogNegativeArraySizeException
493#endif
494    b MterpCommonFallback
495
496common_errNoSuchMethod:
497    EXPORT_PC
498#if MTERP_LOGGING
499    mov  r0, rSELF
500    add  r1, rFP, #OFF_FP_SHADOWFRAME
501    bl MterpLogNoSuchMethodException
502#endif
503    b MterpCommonFallback
504
505common_errNullObject:
506    EXPORT_PC
507#if MTERP_LOGGING
508    mov  r0, rSELF
509    add  r1, rFP, #OFF_FP_SHADOWFRAME
510    bl MterpLogNullObjectException
511#endif
512    b MterpCommonFallback
513
514common_exceptionThrown:
515    EXPORT_PC
516#if MTERP_LOGGING
517    mov  r0, rSELF
518    add  r1, rFP, #OFF_FP_SHADOWFRAME
519    bl MterpLogExceptionThrownException
520#endif
521    b MterpCommonFallback
522
523MterpSuspendFallback:
524    EXPORT_PC
525#if MTERP_LOGGING
526    mov  r0, rSELF
527    add  r1, rFP, #OFF_FP_SHADOWFRAME
528    ldr  r2, [rSELF, #THREAD_FLAGS_OFFSET]
529    bl MterpLogSuspendFallback
530#endif
531    b MterpCommonFallback
532
533/*
534 * If we're here, something is out of the ordinary.  If there is a pending
535 * exception, handle it.  Otherwise, roll back and retry with the reference
536 * interpreter.
537 */
538MterpPossibleException:
539    ldr     r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
540    cmp     r0, #0                                  @ Exception pending?
541    beq     MterpFallback                           @ If not, fall back to reference interpreter.
542    /* intentional fallthrough - handle pending exception. */
543/*
544 * On return from a runtime helper routine, we've found a pending exception.
545 * Can we handle it here - or need to bail out to caller?
546 *
547 */
548MterpException:
549    mov     r0, rSELF
550    add     r1, rFP, #OFF_FP_SHADOWFRAME
551    bl      MterpHandleException                    @ (self, shadow_frame)
552    cmp     r0, #0
553    beq     MterpExceptionReturn                    @ no local catch, back to caller.
554    ldr     r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
555    ldr     r1, [rFP, #OFF_FP_DEX_PC]
556    ldr     rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
557    add     rPC, r0, r1, lsl #1                     @ generate new dex_pc_ptr
558    /* Do we need to switch interpreters? */
559    ldr     r0, [rSELF, #THREAD_USE_MTERP_OFFSET]
560    cmp     r0, #0
561    beq     MterpFallback
562    /* resume execution at catch block */
563    EXPORT_PC
564    FETCH_INST
565    GET_INST_OPCODE ip
566    GOTO_OPCODE ip
567    /* NOTE: no fallthrough */
568
569/*
570 * Common handling for branches with support for Jit profiling.
571 * On entry:
572 *    rINST          <= signed offset
573 *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
574 *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
575 *
576 * We have quite a few different cases for branch profiling, OSR detection and
577 * suspend check support here.
578 *
579 * Taken backward branches:
580 *    If profiling active, do hotness countdown and report if we hit zero.
581 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
582 *    Is there a pending suspend request?  If so, suspend.
583 *
584 * Taken forward branches and not-taken backward branches:
585 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
586 *
587 * Our most common case is expected to be a taken backward branch with active jit profiling,
588 * but no full OSR check and no pending suspend request.
589 * Next most common case is not-taken branch with no full OSR check.
590 *
591 */
592MterpCommonTakenBranchNoFlags:
593    cmp     rINST, #0
594MterpCommonTakenBranch:
595    bgt     .L_forward_branch           @ don't add forward branches to hotness
596/*
597 * We need to subtract 1 from positive values and we should not see 0 here,
598 * so we may use the result of the comparison with -1.
599 */
600#if JIT_CHECK_OSR != -1
601#  error "JIT_CHECK_OSR must be -1."
602#endif
603    cmp     rPROFILE, #JIT_CHECK_OSR
604    beq     .L_osr_check
605    subsgt  rPROFILE, #1
606    beq     .L_add_batch                @ counted down to zero - report
607.L_resume_backward_branch:
608    ldr     lr, [rSELF, #THREAD_FLAGS_OFFSET]
609    REFRESH_IBASE
610    add     r2, rINST, rINST            @ r2<- byte offset
611    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
612    ands    lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
613    bne     .L_suspend_request_pending
614    GET_INST_OPCODE ip                  @ extract opcode from rINST
615    GOTO_OPCODE ip                      @ jump to next instruction
616
617.L_suspend_request_pending:
618    EXPORT_PC
619    mov     r0, rSELF
620    bl      MterpSuspendCheck           @ (self)
621    cmp     r0, #0
622    bne     MterpFallback
623    REFRESH_IBASE                       @ might have changed during suspend
624    GET_INST_OPCODE ip                  @ extract opcode from rINST
625    GOTO_OPCODE ip                      @ jump to next instruction
626
627.L_no_count_backwards:
628    cmp     rPROFILE, #JIT_CHECK_OSR    @ possible OSR re-entry?
629    bne     .L_resume_backward_branch
630.L_osr_check:
631    mov     r0, rSELF
632    add     r1, rFP, #OFF_FP_SHADOWFRAME
633    mov     r2, rINST
634    EXPORT_PC
635    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
636    cmp     r0, #0
637    bne     MterpOnStackReplacement
638    b       .L_resume_backward_branch
639
640.L_forward_branch:
641    cmp     rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
642    beq     .L_check_osr_forward
643.L_resume_forward_branch:
644    add     r2, rINST, rINST            @ r2<- byte offset
645    FETCH_ADVANCE_INST_RB r2            @ update rPC, load rINST
646    GET_INST_OPCODE ip                  @ extract opcode from rINST
647    GOTO_OPCODE ip                      @ jump to next instruction
648
649.L_check_osr_forward:
650    mov     r0, rSELF
651    add     r1, rFP, #OFF_FP_SHADOWFRAME
652    mov     r2, rINST
653    EXPORT_PC
654    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
655    cmp     r0, #0
656    bne     MterpOnStackReplacement
657    b       .L_resume_forward_branch
658
659.L_add_batch:
660    add     r1, rFP, #OFF_FP_SHADOWFRAME
661    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
662    ldr     r0, [rFP, #OFF_FP_METHOD]
663    mov     r2, rSELF
664    bl      MterpAddHotnessBatch        @ (method, shadow_frame, self)
665    mov     rPROFILE, r0                @ restore new hotness countdown to rPROFILE
666    b       .L_no_count_backwards
667
668/*
669 * Entered from the conditional branch handlers when OSR check request active on
670 * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
671 */
672.L_check_not_taken_osr:
673    mov     r0, rSELF
674    add     r1, rFP, #OFF_FP_SHADOWFRAME
675    mov     r2, #2
676    EXPORT_PC
677    bl      MterpMaybeDoOnStackReplacement  @ (self, shadow_frame, offset)
678    cmp     r0, #0
679    bne     MterpOnStackReplacement
680    FETCH_ADVANCE_INST 2
681    GET_INST_OPCODE ip                  @ extract opcode from rINST
682    GOTO_OPCODE ip                      @ jump to next instruction
683
684/*
685 * On-stack replacement has happened, and now we've returned from the compiled method.
686 */
687MterpOnStackReplacement:
688#if MTERP_LOGGING
689    mov r0, rSELF
690    add r1, rFP, #OFF_FP_SHADOWFRAME
691    mov r2, rINST
692    bl MterpLogOSR
693#endif
694    mov r0, #1                          @ Signal normal return
695    b MterpDone
696
697/*
698 * Bail out to reference interpreter.
699 */
700MterpFallback:
701    EXPORT_PC
702#if MTERP_LOGGING
703    mov  r0, rSELF
704    add  r1, rFP, #OFF_FP_SHADOWFRAME
705    bl MterpLogFallback
706#endif
707MterpCommonFallback:
708    mov     r0, #0                                  @ signal retry with reference interpreter.
709    b       MterpDone
710
711/*
712 * We pushed some registers on the stack in ExecuteMterpImpl, then saved
713 * SP and LR.  Here we restore SP, restore the registers, and then restore
714 * LR to PC.
715 *
716 * On entry:
717 *  uint32_t* rFP  (should still be live, pointer to base of vregs)
718 */
719MterpExceptionReturn:
720    mov     r0, #1                                  @ signal return to caller.
721    b MterpDone
722MterpReturn:
723    ldr     r2, [rFP, #OFF_FP_RESULT_REGISTER]
724    str     r0, [r2]
725    str     r1, [r2, #4]
726    mov     r0, #1                                  @ signal return to caller.
727MterpDone:
728/*
729 * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
730 * checking for OSR.  If greater than zero, we might have unreported hotness to register
731 * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
732 * should only reach zero immediately after a hotness decrement, and is then reset to either
733 * a negative special state or the new non-zero countdown value.
734 */
735    cmp     rPROFILE, #0
736    bgt     MterpProfileActive                      @ if > 0, we may have some counts to report.
737    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
738
739MterpProfileActive:
740    mov     rINST, r0                               @ stash return value
741    /* Report cached hotness counts */
742    ldr     r0, [rFP, #OFF_FP_METHOD]
743    add     r1, rFP, #OFF_FP_SHADOWFRAME
744    mov     r2, rSELF
745    strh    rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
746    bl      MterpAddHotnessBatch                    @ (method, shadow_frame, self)
747    mov     r0, rINST                               @ restore return value
748    ldmfd   sp!, {r3-r10,fp,pc}                     @ restore 10 regs and return
749
750    .cfi_endproc
751    END MterpHelpers
752
753%def instruction_end():
754
755    .type artMterpAsmInstructionEnd, #object
756    .hidden artMterpAsmInstructionEnd
757    .global artMterpAsmInstructionEnd
758artMterpAsmInstructionEnd:
759
760%def instruction_start():
761
762    .type artMterpAsmInstructionStart, #object
763    .hidden artMterpAsmInstructionStart
764    .global artMterpAsmInstructionStart
765artMterpAsmInstructionStart = .L_op_nop
766    .text
767
768%def opcode_start():
769    ENTRY mterp_${opcode}
770%def opcode_end():
771    END mterp_${opcode}
772%def helper_start(name):
773    ENTRY ${name}
774%def helper_end(name):
775    END ${name}
776