1%def bindiv(result="", special="", rem=""):
2/*
3 * 32-bit binary div/rem operation.  Handles special case of op0=minint and
4 * op1=-1.
5 */
6    /* div/rem vAA, vBB, vCC */
7    movzbl  2(rPC), %eax                    # eax <- BB
8    movzbl  3(rPC), %ecx                    # ecx <- CC
9    GET_VREG %eax, %eax                     # eax <- vBB
10    GET_VREG %ecx, %ecx                     # ecx <- vCC
11    mov     rIBASE, LOCAL0(%esp)
12    testl   %ecx, %ecx
13    je      common_errDivideByZero
14    movl    %eax, %edx
15    orl     %ecx, %edx
16    testl   $$0xFFFFFF00, %edx              # If both arguments are less
17                                            #   than 8-bit and +ve
18    jz      .L${opcode}_8                   # Do 8-bit divide
19    testl   $$0xFFFF0000, %edx              # If both arguments are less
20                                            #   than 16-bit and +ve
21    jz      .L${opcode}_16                  # Do 16-bit divide
22    cmpl    $$-1, %ecx
23    jne     .L${opcode}_32
24    cmpl    $$0x80000000, %eax
25    jne     .L${opcode}_32
26    movl    $special, $result
27    jmp     .L${opcode}_finish
28%  add_helper(lambda: bindiv_helper(result, rem))
29
30%def bindiv_helper(result, rem):
31.L${opcode}_32:
32    cltd
33    idivl   %ecx
34    jmp     .L${opcode}_finish
35.L${opcode}_8:
36    div     %cl                             # 8-bit divide otherwise.
37                                            # Remainder in %ah, quotient in %al
38    .if $rem
39    movl    %eax, %edx
40    shr     $$8, %edx
41    .else
42    andl    $$0x000000FF, %eax
43    .endif
44    jmp     .L${opcode}_finish
45.L${opcode}_16:
46    xorl    %edx, %edx                      # Clear %edx before divide
47    div     %cx
48.L${opcode}_finish:
49    SET_VREG $result, rINST
50    mov     LOCAL0(%esp), rIBASE
51    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
52
53%def bindiv2addr(result="", special=""):
54/*
55 * 32-bit binary div/rem operation.  Handles special case of op0=minint and
56 * op1=-1.
57 */
58    /* div/rem/2addr vA, vB */
59    movzx   rINSTbl, %ecx                   # eax <- BA
60    mov     rIBASE, LOCAL0(%esp)
61    sarl    $$4, %ecx                       # ecx <- B
62    GET_VREG %ecx, %ecx                     # eax <- vBB
63    andb    $$0xf, rINSTbl                  # rINST <- A
64    GET_VREG %eax, rINST                    # eax <- vBB
65    testl   %ecx, %ecx
66    je      common_errDivideByZero
67    cmpl    $$-1, %ecx
68    jne     .L${opcode}_continue_div2addr
69    cmpl    $$0x80000000, %eax
70    jne     .L${opcode}_continue_div2addr
71    movl    $special, $result
72    SET_VREG $result, rINST
73    mov     LOCAL0(%esp), rIBASE
74    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
75%  add_helper(lambda: bindiv2addr_helper(result))
76
77%def bindiv2addr_helper(result):
78.L${opcode}_continue_div2addr:
79    cltd
80    idivl   %ecx
81    SET_VREG $result, rINST
82    mov     LOCAL0(%esp), rIBASE
83    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
84
85%def bindivLit16(result="", special=""):
86/*
87 * 32-bit binary div/rem operation.  Handles special case of op0=minint and
88 * op1=-1.
89 */
90    /* div/rem/lit16 vA, vB, #+CCCC */
91    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
92    movzbl  rINSTbl, %eax                   # eax <- 000000BA
93    sarl    $$4, %eax                       # eax <- B
94    GET_VREG %eax, %eax                     # eax <- vB
95    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
96    andb    $$0xf, rINSTbl                  # rINST <- A
97    testl   %ecx, %ecx
98    je      common_errDivideByZero
99    cmpl    $$-1, %ecx
100    jne     .L${opcode}_continue_div
101    cmpl    $$0x80000000, %eax
102    jne     .L${opcode}_continue_div
103    movl    $special, %eax
104    SET_VREG %eax, rINST
105    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
106
107.L${opcode}_continue_div:
108    mov     rIBASE, LOCAL0(%esp)
109    cltd
110    idivl   %ecx
111    SET_VREG $result, rINST
112    mov     LOCAL0(%esp), rIBASE
113    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
114
115%def bindivLit8(result="", special=""):
116/*
117 * 32-bit div/rem "lit8" binary operation.  Handles special case of
118 * op0=minint & op1=-1
119 */
120    /* div/rem/lit8 vAA, vBB, #+CC */
121    movzbl  2(rPC), %eax                    # eax <- BB
122    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
123    GET_VREG  %eax, %eax                    # eax <- rBB
124    testl   %ecx, %ecx
125    je      common_errDivideByZero
126    cmpl    $$0x80000000, %eax
127    jne     .L${opcode}_continue_div
128    cmpl    $$-1, %ecx
129    jne     .L${opcode}_continue_div
130    movl    $special, %eax
131    SET_VREG %eax, rINST
132    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
133
134.L${opcode}_continue_div:
135    mov     rIBASE, LOCAL0(%esp)
136    cltd
137    idivl   %ecx
138    SET_VREG $result, rINST
139    mov     LOCAL0(%esp), rIBASE
140    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
141
142%def binop(result="%eax", instr=""):
143/*
144 * Generic 32-bit binary operation.  Provide an "instr" line that
145 * specifies an instruction that performs "result = eax op VREG_ADDRESS(%ecx)".
146 * This could be an x86 instruction or a function call.  (If the result
147 * comes back in a register other than eax, you can override "result".)
148 *
149 * For: add-int, sub-int, and-int, or-int,
150 *      xor-int, shl-int, shr-int, ushr-int
151 */
152    /* binop vAA, vBB, vCC */
153    movzbl  2(rPC), %eax                    # eax <- BB
154    movzbl  3(rPC), %ecx                    # ecx <- CC
155    GET_VREG %eax, %eax                     # eax <- vBB
156    $instr VREG_ADDRESS(%ecx), %eax
157    SET_VREG $result, rINST
158    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
159
160%def binop1(result="%eax", tmp="%ecx", instr=""):
161/*
162 * Generic 32-bit binary operation in which both operands loaded to
163 * registers (op0 in eax, op1 in ecx).
164 */
165    /* binop vAA, vBB, vCC */
166    movzbl  2(rPC),%eax                     # eax <- BB
167    movzbl  3(rPC),%ecx                     # ecx <- CC
168    GET_VREG %eax, %eax                     # eax <- vBB
169    GET_VREG %ecx, %ecx                     # eax <- vBB
170    $instr                                  # ex: addl    %ecx,%eax
171    SET_VREG $result, rINST
172    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
173
174%def binop2addr(result="%eax", instr=""):
175/*
176 * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
177 * that specifies an instruction that performs "result = r0 op r1".
178 * This could be an instruction or a function call.
179 *
180 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
181 *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
182 *      shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
183 *      sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
184 */
185    /* binop/2addr vA, vB */
186    movzx   rINSTbl, %ecx                   # ecx <- A+
187    sarl    $$4, rINST                      # rINST <- B
188    GET_VREG %eax, rINST                    # eax <- vB
189    andb    $$0xf, %cl                      # ecx <- A
190    $instr %eax, VREG_ADDRESS(%ecx)
191    CLEAR_REF %ecx
192    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
193
194%def binopLit16(result="%eax", instr=""):
195/*
196 * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
197 * that specifies an instruction that performs "result = eax op ecx".
198 * This could be an x86 instruction or a function call.  (If the result
199 * comes back in a register other than eax, you can override "result".)
200 *
201 * For: add-int/lit16, rsub-int,
202 *      and-int/lit16, or-int/lit16, xor-int/lit16
203 */
204    /* binop/lit16 vA, vB, #+CCCC */
205    movzbl  rINSTbl, %eax                   # eax <- 000000BA
206    sarl    $$4, %eax                       # eax <- B
207    GET_VREG %eax, %eax                     # eax <- vB
208    movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
209    andb    $$0xf, rINSTbl                  # rINST <- A
210    $instr                                  # for example: addl %ecx, %eax
211    SET_VREG $result, rINST
212    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
213
214%def binopLit8(result="%eax", instr=""):
215/*
216 * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
217 * that specifies an instruction that performs "result = eax op ecx".
218 * This could be an x86 instruction or a function call.  (If the result
219 * comes back in a register other than r0, you can override "result".)
220 *
221 * For: add-int/lit8, rsub-int/lit8
222 *      and-int/lit8, or-int/lit8, xor-int/lit8,
223 *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
224 */
225    /* binop/lit8 vAA, vBB, #+CC */
226    movzbl  2(rPC), %eax                    # eax <- BB
227    movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
228    GET_VREG %eax, %eax                     # eax <- rBB
229    $instr                                  # ex: addl %ecx,%eax
230    SET_VREG $result, rINST
231    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
232
233%def binopWide(instr1="", instr2=""):
234/*
235 * Generic 64-bit binary operation.
236 */
237    /* binop vAA, vBB, vCC */
238    movzbl  2(rPC), %eax                    # eax <- BB
239    movzbl  3(rPC), %ecx                    # ecx <- CC
240    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
241    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
242    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
243    $instr1 VREG_ADDRESS(%ecx), rIBASE
244    $instr2 VREG_HIGH_ADDRESS(%ecx), %eax
245    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
246    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
247    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
248    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
249
250%def binopWide2addr(instr1="", instr2=""):
251/*
252 * Generic 64-bit binary operation.
253 */
254    /* binop/2addr vA, vB */
255    movzbl  rINSTbl, %ecx                   # ecx<- BA
256    sarl    $$4, %ecx                       # ecx<- B
257    GET_VREG %eax, %ecx                     # eax<- v[B+0]
258    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
259    andb    $$0xF, rINSTbl                  # rINST<- A
260    $instr1 %eax, VREG_ADDRESS(rINST)
261    $instr2 %ecx, VREG_HIGH_ADDRESS(rINST)
262    CLEAR_WIDE_REF rINST
263    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
264
265%def cvtfp_int(srcdouble="1", tgtlong="1"):
266/* On fp to int conversions, Java requires that
267 * if the result > maxint, it should be clamped to maxint.  If it is less
268 * than minint, it should be clamped to minint.  If it is a nan, the result
269 * should be zero.  Further, the rounding mode is to truncate.  This model
270 * differs from what is delivered normally via the x86 fpu, so we have
271 * to play some games.
272 */
273    /* float/double to int/long vA, vB */
274    movzbl  rINSTbl, %ecx                   # ecx <- A+
275    sarl    $$4, rINST                      # rINST <- B
276    .if $srcdouble
277    fldl    VREG_ADDRESS(rINST)             # %st0 <- vB
278    .else
279    flds    VREG_ADDRESS(rINST)             # %st0 <- vB
280    .endif
281    ftst
282    fnstcw  LOCAL0(%esp)                    # remember original rounding mode
283    movzwl  LOCAL0(%esp), %eax
284    movb    $$0xc, %ah
285    movw    %ax, LOCAL0+2(%esp)
286    fldcw   LOCAL0+2(%esp)                  # set "to zero" rounding mode
287    andb    $$0xf, %cl                      # ecx <- A
288    .if $tgtlong
289    fistpll VREG_ADDRESS(%ecx)              # convert and store
290    .else
291    fistpl  VREG_ADDRESS(%ecx)              # convert and store
292    .endif
293    fldcw   LOCAL0(%esp)                    # restore previous rounding mode
294    .if $tgtlong
295    movl    $$0x80000000, %eax
296    xorl    VREG_HIGH_ADDRESS(%ecx), %eax
297    orl     VREG_ADDRESS(%ecx), %eax
298    .else
299    cmpl    $$0x80000000, VREG_ADDRESS(%ecx)
300    .endif
301    je      .L${opcode}_special_case # fix up result
302
303.L${opcode}_finish:
304    xor     %eax, %eax
305    mov     %eax, VREG_REF_ADDRESS(%ecx)
306    .if $tgtlong
307    mov     %eax, VREG_REF_HIGH_ADDRESS(%ecx)
308    .endif
309    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
310%  add_helper(lambda: cvtfp_int_helper(tgtlong))
311
312%def cvtfp_int_helper(tgtlong):
313.L${opcode}_special_case:
314    fnstsw  %ax
315    sahf
316    jp      .L${opcode}_isNaN
317    adcl    $$-1, VREG_ADDRESS(%ecx)
318    .if $tgtlong
319    adcl    $$-1, VREG_HIGH_ADDRESS(%ecx)
320    .endif
321   jmp      .L${opcode}_finish
322.L${opcode}_isNaN:
323    movl    $$0, VREG_ADDRESS(%ecx)
324    .if $tgtlong
325    movl    $$0, VREG_HIGH_ADDRESS(%ecx)
326    .endif
327    jmp     .L${opcode}_finish
328
329%def shop2addr(result="%eax", instr=""):
330/*
331 * Generic 32-bit "shift/2addr" operation.
332 */
333    /* shift/2addr vA, vB */
334    movzx   rINSTbl, %ecx                   # eax <- BA
335    sarl    $$4, %ecx                       # ecx <- B
336    GET_VREG %ecx, %ecx                     # eax <- vBB
337    andb    $$0xf, rINSTbl                  # rINST <- A
338    GET_VREG %eax, rINST                    # eax <- vAA
339    $instr                                  # ex: sarl %cl, %eax
340    SET_VREG $result, rINST
341    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
342
343%def unop(instr=""):
344/*
345 * Generic 32-bit unary operation.  Provide an "instr" line that
346 * specifies an instruction that performs "result = op eax".
347 */
348    /* unop vA, vB */
349    movzbl  rINSTbl,%ecx                    # ecx <- A+
350    sarl    $$4,rINST                       # rINST <- B
351    GET_VREG %eax, rINST                    # eax <- vB
352    andb    $$0xf,%cl                       # ecx <- A
353    $instr
354    SET_VREG %eax, %ecx
355    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
356
357%def op_add_int():
358%  binop(instr="addl")
359
360%def op_add_int_2addr():
361%  binop2addr(instr="addl")
362
363%def op_add_int_lit16():
364%  binopLit16(instr="addl    %ecx, %eax")
365
366%def op_add_int_lit8():
367%  binopLit8(instr="addl    %ecx, %eax")
368
369%def op_add_long():
370%  binopWide(instr1="addl", instr2="adcl")
371
372%def op_add_long_2addr():
373%  binopWide2addr(instr1="addl", instr2="adcl")
374
375%def op_and_int():
376%  binop(instr="andl")
377
378%def op_and_int_2addr():
379%  binop2addr(instr="andl")
380
381%def op_and_int_lit16():
382%  binopLit16(instr="andl    %ecx, %eax")
383
384%def op_and_int_lit8():
385%  binopLit8(instr="andl    %ecx, %eax")
386
387%def op_and_long():
388%  binopWide(instr1="andl", instr2="andl")
389
390%def op_and_long_2addr():
391%  binopWide2addr(instr1="andl", instr2="andl")
392
393%def op_cmp_long():
394/*
395 * Compare two 64-bit values.  Puts 0, 1, or -1 into the destination
396 * register based on the results of the comparison.
397 */
398    /* cmp-long vAA, vBB, vCC */
399    movzbl  2(rPC), %eax                    # eax <- BB
400    movzbl  3(rPC), %ecx                    # ecx <- CC
401    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1], BB is clobbered
402    cmpl    VREG_HIGH_ADDRESS(%ecx), %eax
403    jl      .L${opcode}_smaller
404    jg      .L${opcode}_bigger
405    movzbl  2(rPC), %eax                    # eax <- BB, restore BB
406    GET_VREG %eax, %eax                     # eax <- v[BB]
407    sub     VREG_ADDRESS(%ecx), %eax
408    ja      .L${opcode}_bigger
409    jb      .L${opcode}_smaller
410.L${opcode}_finish:
411    SET_VREG %eax, rINST
412    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
413
414.L${opcode}_bigger:
415    movl    $$1, %eax
416    jmp     .L${opcode}_finish
417
418.L${opcode}_smaller:
419    movl    $$-1, %eax
420    jmp     .L${opcode}_finish
421
422%def op_div_int():
423%  bindiv(result="%eax", special="$0x80000000", rem="0")
424
425%def op_div_int_2addr():
426%  bindiv2addr(result="%eax", special="$0x80000000")
427
428%def op_div_int_lit16():
429%  bindivLit16(result="%eax", special="$0x80000000")
430
431%def op_div_int_lit8():
432%  bindivLit8(result="%eax", special="$0x80000000")
433
434%def op_div_long(routine="art_quick_ldiv"):
435/* art_quick_* methods has quick abi,
436 *   so use eax, ecx, edx, ebx for args
437 */
438    /* div vAA, vBB, vCC */
439    .extern $routine
440    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
441    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
442    movzbl  3(rPC), %eax                    # eax <- CC
443    GET_VREG %ecx, %eax
444    GET_VREG_HIGH %ebx, %eax
445    movl    %ecx, %edx
446    orl     %ebx, %ecx
447    jz      common_errDivideByZero
448    movzbl  2(rPC), %eax                    # eax <- BB
449    GET_VREG_HIGH %ecx, %eax
450    GET_VREG %eax, %eax
451    call    SYMBOL($routine)
452    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
453    SET_VREG_HIGH rIBASE, rINST
454    SET_VREG %eax, rINST
455    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
456    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
457
458%def op_div_long_2addr(routine="art_quick_ldiv"):
459/* art_quick_* methods has quick abi,
460 *   so use eax, ecx, edx, ebx for args
461 */
462    /* div/2addr vA, vB */
463    .extern   $routine
464    mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
465    movzbl  rINSTbl, %eax
466    shrl    $$4, %eax                       # eax <- B
467    andb    $$0xf, rINSTbl                  # rINST <- A
468    mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
469    movl    %ebx, %ecx
470    GET_VREG %edx, %eax
471    GET_VREG_HIGH %ebx, %eax
472    movl    %edx, %eax
473    orl     %ebx, %eax
474    jz      common_errDivideByZero
475    GET_VREG %eax, %ecx
476    GET_VREG_HIGH %ecx, %ecx
477    call    SYMBOL($routine)
478    mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
479    SET_VREG_HIGH rIBASE, rINST
480    SET_VREG %eax, rINST
481    mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
482    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
483
484%def op_int_to_byte():
485%  unop(instr="movsbl  %al, %eax")
486
487%def op_int_to_char():
488%  unop(instr="movzwl  %ax,%eax")
489
490%def op_int_to_long():
491    /* int to long vA, vB */
492    movzbl  rINSTbl, %eax                   # eax <- +A
493    sarl    $$4, %eax                       # eax <- B
494    GET_VREG %eax, %eax                     # eax <- vB
495    andb    $$0xf, rINSTbl                  # rINST <- A
496    movl    rIBASE, %ecx                    # cltd trashes rIBASE/edx
497    cltd                                    # rINST:eax<- sssssssBBBBBBBB
498    SET_VREG_HIGH rIBASE, rINST             # v[A+1] <- rIBASE
499    SET_VREG %eax, rINST                    # v[A+0] <- %eax
500    movl    %ecx, rIBASE
501    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
502
503
504%def op_int_to_short():
505%  unop(instr="movswl %ax, %eax")
506
507%def op_long_to_int():
508/* we ignore the high word, making this equivalent to a 32-bit reg move */
509%  op_move()
510
511%def op_mul_int():
512    /*
513     * 32-bit binary multiplication.
514     */
515    /* mul vAA, vBB, vCC */
516    movzbl  2(rPC), %eax                    # eax <- BB
517    movzbl  3(rPC), %ecx                    # ecx <- CC
518    GET_VREG %eax, %eax                     # eax <- vBB
519    mov     rIBASE, LOCAL0(%esp)
520    imull   VREG_ADDRESS(%ecx), %eax        # trashes rIBASE/edx
521    mov     LOCAL0(%esp), rIBASE
522    SET_VREG %eax, rINST
523    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
524
525%def op_mul_int_2addr():
526    /* mul vA, vB */
527    movzx   rINSTbl, %ecx                   # ecx <- A+
528    sarl    $$4, rINST                      # rINST <- B
529    GET_VREG %eax, rINST                    # eax <- vB
530    andb    $$0xf, %cl                      # ecx <- A
531    movl    rIBASE, rINST
532    imull   VREG_ADDRESS(%ecx), %eax        # trashes rIBASE/edx
533    movl    rINST, rIBASE
534    SET_VREG %eax, %ecx
535    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
536
537%def op_mul_int_lit16():
538    /* mul/lit16 vA, vB, #+CCCC */
539    /* Need A in rINST, ssssCCCC in ecx, vB in eax */
540    movzbl  rINSTbl, %eax                   # eax <- 000000BA
541    sarl    $$4, %eax                       # eax <- B
542    GET_VREG %eax, %eax                     # eax <- vB
543    movl    rIBASE, %ecx
544    movswl  2(rPC), rIBASE                  # rIBASE <- ssssCCCC
545    andb    $$0xf, rINSTbl                  # rINST <- A
546    imull   rIBASE, %eax                    # trashes rIBASE/edx
547    movl    %ecx, rIBASE
548    SET_VREG %eax, rINST
549    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
550
551%def op_mul_int_lit8():
552    /* mul/lit8 vAA, vBB, #+CC */
553    movzbl  2(rPC), %eax                    # eax <- BB
554    movl    rIBASE, %ecx
555    GET_VREG  %eax, %eax                    # eax <- rBB
556    movsbl  3(rPC), rIBASE                  # rIBASE <- ssssssCC
557    imull   rIBASE, %eax                    # trashes rIBASE/edx
558    movl    %ecx, rIBASE
559    SET_VREG %eax, rINST
560    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
561
562%def op_mul_long():
563/*
564 * Signed 64-bit integer multiply.
565 *
566 * We could definately use more free registers for
567 * this code.   We spill rINSTw (ebx),
568 * giving us eax, ebc, ecx and edx as computational
569 * temps.  On top of that, we'll spill edi (rFP)
570 * for use as the vB pointer and esi (rPC) for use
571 * as the vC pointer.  Yuck.
572 *
573 */
574    /* mul-long vAA, vBB, vCC */
575    movzbl  2(rPC), %eax                    # eax <- B
576    movzbl  3(rPC), %ecx                    # ecx <- C
577    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
578    mov     rFP, LOCAL1(%esp)               # save FP
579    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
580    leal    (rFP,%eax,4), %esi              # esi <- &v[B]
581    leal    VREG_ADDRESS(%ecx), rFP         # rFP <- &v[C]
582    movl    4(%esi), %ecx                   # ecx <- Bmsw
583    imull   (rFP), %ecx                     # ecx <- (Bmsw*Clsw)
584    movl    4(rFP), %eax                    # eax <- Cmsw
585    imull   (%esi), %eax                    # eax <- (Cmsw*Blsw)
586    addl    %eax, %ecx                      # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
587    movl    (rFP), %eax                     # eax <- Clsw
588    mull    (%esi)                          # eax <- (Clsw*Alsw)
589    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
590    mov     LOCAL1(%esp), rFP               # restore FP
591    leal    (%ecx,rIBASE), rIBASE           # full result now in rIBASE:%eax
592    SET_VREG_HIGH rIBASE, rINST             # v[B+1] <- rIBASE
593    mov     LOCAL2(%esp), rIBASE            # restore IBASE
594    SET_VREG %eax, rINST                    # v[B] <- eax
595    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
596
597%def op_mul_long_2addr():
598/*
599 * Signed 64-bit integer multiply, 2-addr version
600 *
601 * We could definately use more free registers for
602 * this code.  We must spill %edx (rIBASE) because it
603 * is used by imul.  We'll also spill rINST (ebx),
604 * giving us eax, ebc, ecx and rIBASE as computational
605 * temps.  On top of that, we'll spill %esi (edi)
606 * for use as the vA pointer and rFP (esi) for use
607 * as the vB pointer.  Yuck.
608 */
609    /* mul-long/2addr vA, vB */
610    movzbl  rINSTbl, %eax                   # eax <- BA
611    andb    $$0xf, %al                      # eax <- A
612    CLEAR_WIDE_REF %eax                     # clear refs in advance
613    sarl    $$4, rINST                      # rINST <- B
614    mov     rPC, LOCAL0(%esp)               # save Interpreter PC
615    mov     rFP, LOCAL1(%esp)               # save FP
616    mov     rIBASE, LOCAL2(%esp)            # save rIBASE
617    leal    (rFP,%eax,4), %esi              # esi <- &v[A]
618    leal    (rFP,rINST,4), rFP              # rFP <- &v[B]
619    movl    4(%esi), %ecx                   # ecx <- Amsw
620    imull   (rFP), %ecx                     # ecx <- (Amsw*Blsw)
621    movl    4(rFP), %eax                    # eax <- Bmsw
622    imull   (%esi), %eax                    # eax <- (Bmsw*Alsw)
623    addl    %eax, %ecx                      # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
624    movl    (rFP), %eax                     # eax <- Blsw
625    mull    (%esi)                          # eax <- (Blsw*Alsw)
626    leal    (%ecx,rIBASE), rIBASE           # full result now in %edx:%eax
627    movl    rIBASE, 4(%esi)                 # v[A+1] <- rIBASE
628    movl    %eax, (%esi)                    # v[A] <- %eax
629    mov     LOCAL0(%esp), rPC               # restore Interpreter PC
630    mov     LOCAL2(%esp), rIBASE            # restore IBASE
631    mov     LOCAL1(%esp), rFP               # restore FP
632    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
633
634%def op_neg_int():
635%  unop(instr="negl    %eax")
636
637%def op_neg_long():
638    /* unop vA, vB */
639    movzbl  rINSTbl, %ecx                   # ecx <- BA
640    sarl    $$4, %ecx                       # ecx <- B
641    andb    $$0xf, rINSTbl                  # rINST <- A
642    GET_VREG %eax, %ecx                     # eax <- v[B+0]
643    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
644    negl    %eax
645    adcl    $$0, %ecx
646    negl    %ecx
647    SET_VREG %eax, rINST                    # v[A+0] <- eax
648    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
649    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
650
651
652%def op_not_int():
653%  unop(instr="notl %eax")
654
655%def op_not_long():
656    /* unop vA, vB */
657    movzbl  rINSTbl, %ecx                   # ecx <- BA
658    sarl    $$4, %ecx                       # ecx <- B
659    andb    $$0xf, rINSTbl                  # rINST <- A
660    GET_VREG %eax, %ecx                     # eax <- v[B+0]
661    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
662    notl    %eax
663    notl    %ecx
664    SET_VREG %eax, rINST                    # v[A+0] <- eax
665    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
666    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
667
668%def op_or_int():
669%  binop(instr="orl")
670
671%def op_or_int_2addr():
672%  binop2addr(instr="orl")
673
674%def op_or_int_lit16():
675%  binopLit16(instr="orl     %ecx, %eax")
676
677%def op_or_int_lit8():
678%  binopLit8(instr="orl     %ecx, %eax")
679
680%def op_or_long():
681%  binopWide(instr1="orl", instr2="orl")
682
683%def op_or_long_2addr():
684%  binopWide2addr(instr1="orl", instr2="orl")
685
686%def op_rem_int():
687%  bindiv(result="rIBASE", special="$0", rem="1")
688
689%def op_rem_int_2addr():
690%  bindiv2addr(result="rIBASE", special="$0")
691
692%def op_rem_int_lit16():
693%  bindivLit16(result="rIBASE", special="$0")
694
695%def op_rem_int_lit8():
696%  bindivLit8(result="rIBASE", special="$0")
697
698%def op_rem_long():
699%  op_div_long(routine="art_quick_lmod")
700
701%def op_rem_long_2addr():
702%  op_div_long_2addr(routine="art_quick_lmod")
703
704%def op_rsub_int():
705/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
706%  binopLit16(instr="subl    %eax, %ecx", result="%ecx")
707
708%def op_rsub_int_lit8():
709%  binopLit8(instr="subl    %eax, %ecx", result="%ecx")
710
711%def op_shl_int():
712%  binop1(instr="sall    %cl, %eax")
713
714%def op_shl_int_2addr():
715%  shop2addr(instr="sall    %cl, %eax")
716
717%def op_shl_int_lit8():
718%  binopLit8(instr="sall    %cl, %eax")
719
720%def op_shl_long():
721/*
722 * Long integer shift.  This is different from the generic 32/64-bit
723 * binary operations because vAA/vBB are 64-bit but vCC (the shift
724 * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
725 * 6 bits of the shift distance.  x86 shifts automatically mask off
726 * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
727 * case specially.
728 */
729    /* shl-long vAA, vBB, vCC */
730    /* ecx gets shift count */
731    /* Need to spill rINST */
732    /* rINSTw gets AA */
733    movzbl  2(rPC), %eax                    # eax <- BB
734    movzbl  3(rPC), %ecx                    # ecx <- CC
735    movl    rIBASE, LOCAL0(%esp)
736    GET_VREG_HIGH rIBASE, %eax              # ecx <- v[BB+1]
737    GET_VREG %ecx, %ecx                     # ecx <- vCC
738    GET_VREG %eax, %eax                     # eax <- v[BB+0]
739    shldl   %eax,rIBASE
740    sall    %cl, %eax
741    testb   $$32, %cl
742    je      2f
743    movl    %eax, rIBASE
744    xorl    %eax, %eax
7452:
746    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
747    movl    LOCAL0(%esp), rIBASE
748    SET_VREG %eax, rINST                    # v[AA+0] <- %eax
749    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
750
751%def op_shl_long_2addr():
752/*
753 * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
754 * 32-bit shift distance.
755 */
756    /* shl-long/2addr vA, vB */
757    /* ecx gets shift count */
758    /* Need to spill rIBASE */
759    /* rINSTw gets AA */
760    movzbl  rINSTbl, %ecx                   # ecx <- BA
761    andb    $$0xf, rINSTbl                  # rINST <- A
762    GET_VREG %eax, rINST                    # eax <- v[AA+0]
763    sarl    $$4, %ecx                       # ecx <- B
764    movl    rIBASE, LOCAL0(%esp)
765    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
766    GET_VREG %ecx, %ecx                     # ecx <- vBB
767    shldl   %eax, rIBASE
768    sall    %cl, %eax
769    testb   $$32, %cl
770    je      2f
771    movl    %eax, rIBASE
772    xorl    %eax, %eax
7732:
774    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
775    movl    LOCAL0(%esp), rIBASE
776    SET_VREG %eax, rINST                    # v[AA+0] <- eax
777    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
778
779%def op_shr_int():
780%  binop1(instr="sarl    %cl, %eax")
781
782%def op_shr_int_2addr():
783%  shop2addr(instr="sarl    %cl, %eax")
784
785%def op_shr_int_lit8():
786%  binopLit8(instr="sarl    %cl, %eax")
787
788%def op_shr_long():
789/*
790 * Long integer shift.  This is different from the generic 32/64-bit
791 * binary operations because vAA/vBB are 64-bit but vCC (the shift
792 * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
793 * 6 bits of the shift distance.  x86 shifts automatically mask off
794 * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
795 * case specially.
796 */
797    /* shr-long vAA, vBB, vCC */
798    /* ecx gets shift count */
799    /* Need to spill rIBASE */
800    /* rINSTw gets AA */
801    movzbl  2(rPC), %eax                    # eax <- BB
802    movzbl  3(rPC), %ecx                    # ecx <- CC
803    movl    rIBASE, LOCAL0(%esp)
804    GET_VREG_HIGH rIBASE, %eax              # rIBASE<- v[BB+1]
805    GET_VREG %ecx, %ecx                     # ecx <- vCC
806    GET_VREG %eax, %eax                     # eax <- v[BB+0]
807    shrdl   rIBASE, %eax
808    sarl    %cl, rIBASE
809    testb   $$32, %cl
810    je      2f
811    movl    rIBASE, %eax
812    sarl    $$31, rIBASE
8132:
814    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
815    movl    LOCAL0(%esp), rIBASE
816    SET_VREG %eax, rINST                    # v[AA+0] <- eax
817    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
818
819%def op_shr_long_2addr():
820/*
821 * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
822 * 32-bit shift distance.
823 */
824    /* shl-long/2addr vA, vB */
825    /* ecx gets shift count */
826    /* Need to spill rIBASE */
827    /* rINSTw gets AA */
828    movzbl  rINSTbl, %ecx                   # ecx <- BA
829    andb    $$0xf, rINSTbl                  # rINST <- A
830    GET_VREG %eax, rINST                    # eax <- v[AA+0]
831    sarl    $$4, %ecx                       # ecx <- B
832    movl    rIBASE, LOCAL0(%esp)
833    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
834    GET_VREG %ecx, %ecx                     # ecx <- vBB
835    shrdl   rIBASE, %eax
836    sarl    %cl, rIBASE
837    testb   $$32, %cl
838    je      2f
839    movl    rIBASE, %eax
840    sarl    $$31, rIBASE
8412:
842    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
843    movl    LOCAL0(%esp), rIBASE
844    SET_VREG %eax, rINST                    # v[AA+0] <- eax
845    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
846
847%def op_sub_int():
848%  binop(instr="subl")
849
850%def op_sub_int_2addr():
851%  binop2addr(instr="subl")
852
853%def op_sub_long():
854%  binopWide(instr1="subl", instr2="sbbl")
855
856%def op_sub_long_2addr():
857%  binopWide2addr(instr1="subl", instr2="sbbl")
858
859%def op_ushr_int():
860%  binop1(instr="shrl    %cl, %eax")
861
862%def op_ushr_int_2addr():
863%  shop2addr(instr="shrl    %cl, %eax")
864
865%def op_ushr_int_lit8():
866%  binopLit8(instr="shrl    %cl, %eax")
867
868%def op_ushr_long():
869/*
870 * Long integer shift.  This is different from the generic 32/64-bit
871 * binary operations because vAA/vBB are 64-bit but vCC (the shift
872 * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
873 * 6 bits of the shift distance.  x86 shifts automatically mask off
874 * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
875 * case specially.
876 */
877    /* shr-long vAA, vBB, vCC */
878    /* ecx gets shift count */
879    /* Need to spill rIBASE */
880    /* rINSTw gets AA */
881    movzbl  2(rPC), %eax                    # eax <- BB
882    movzbl  3(rPC), %ecx                    # ecx <- CC
883    movl    rIBASE, LOCAL0(%esp)
884    GET_VREG_HIGH rIBASE, %eax              # rIBASE <- v[BB+1]
885    GET_VREG %ecx, %ecx                     # ecx <- vCC
886    GET_VREG %eax, %eax                     # eax <- v[BB+0]
887    shrdl   rIBASE, %eax
888    shrl    %cl, rIBASE
889    testb   $$32, %cl
890    je      2f
891    movl    rIBASE, %eax
892    xorl    rIBASE, rIBASE
8932:
894    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
895    movl    LOCAL0(%esp), rIBASE
896    SET_VREG %eax, rINST                    # v[BB+0] <- eax
897    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
898
899%def op_ushr_long_2addr():
900/*
901 * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
902 * 32-bit shift distance.
903 */
904    /* shl-long/2addr vA, vB */
905    /* ecx gets shift count */
906    /* Need to spill rIBASE */
907    /* rINSTw gets AA */
908    movzbl  rINSTbl, %ecx                   # ecx <- BA
909    andb    $$0xf, rINSTbl                  # rINST <- A
910    GET_VREG %eax, rINST                    # eax <- v[AA+0]
911    sarl    $$4, %ecx                       # ecx <- B
912    movl    rIBASE, LOCAL0(%esp)
913    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
914    GET_VREG %ecx, %ecx                     # ecx <- vBB
915    shrdl   rIBASE, %eax
916    shrl    %cl, rIBASE
917    testb   $$32, %cl
918    je      2f
919    movl    rIBASE, %eax
920    xorl    rIBASE, rIBASE
9212:
922    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
923    movl    LOCAL0(%esp), rIBASE
924    SET_VREG %eax, rINST                    # v[AA+0] <- eax
925    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
926
927%def op_xor_int():
928%  binop(instr="xorl")
929
930%def op_xor_int_2addr():
931%  binop2addr(instr="xorl")
932
933%def op_xor_int_lit16():
934%  binopLit16(instr="xorl    %ecx, %eax")
935
936%def op_xor_int_lit8():
937%  binopLit8(instr="xorl    %ecx, %eax")
938
939%def op_xor_long():
940%  binopWide(instr1="xorl", instr2="xorl")
941
942%def op_xor_long_2addr():
943%  binopWide2addr(instr1="xorl", instr2="xorl")
944