1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "slicer/instrumentation.h"
18 #include "slicer/dex_ir_builder.h"
19
20 namespace slicer {
21
22 namespace {
23
24 struct BytecodeConvertingVisitor : public lir::Visitor {
25 lir::Bytecode* out = nullptr;
Visitslicer::__anon9dc31ed30111::BytecodeConvertingVisitor26 bool Visit(lir::Bytecode* bytecode) {
27 out = bytecode;
28 return true;
29 }
30 };
31
BoxValue(lir::Bytecode * bytecode,lir::CodeIr * code_ir,ir::Type * type,dex::u4 src_reg,dex::u4 dst_reg)32 void BoxValue(lir::Bytecode* bytecode,
33 lir::CodeIr* code_ir,
34 ir::Type* type,
35 dex::u4 src_reg,
36 dex::u4 dst_reg) {
37 bool is_wide = false;
38 const char* boxed_type_name = nullptr;
39 switch (*(type->descriptor)->c_str()) {
40 case 'Z':
41 boxed_type_name = "Ljava/lang/Boolean;";
42 break;
43 case 'B':
44 boxed_type_name = "Ljava/lang/Byte;";
45 break;
46 case 'C':
47 boxed_type_name = "Ljava/lang/Character;";
48 break;
49 case 'S':
50 boxed_type_name = "Ljava/lang/Short;";
51 break;
52 case 'I':
53 boxed_type_name = "Ljava/lang/Integer;";
54 break;
55 case 'J':
56 is_wide = true;
57 boxed_type_name = "Ljava/lang/Long;";
58 break;
59 case 'F':
60 boxed_type_name = "Ljava/lang/Float;";
61 break;
62 case 'D':
63 is_wide = true;
64 boxed_type_name = "Ljava/lang/Double;";
65 break;
66 }
67 SLICER_CHECK(boxed_type_name != nullptr);
68
69 ir::Builder builder(code_ir->dex_ir);
70 std::vector<ir::Type*> param_types;
71 param_types.push_back(type);
72
73 auto boxed_type = builder.GetType(boxed_type_name);
74 auto ir_proto = builder.GetProto(boxed_type, builder.GetTypeList(param_types));
75
76 auto ir_method_decl = builder.GetMethodDecl(
77 builder.GetAsciiString("valueOf"), ir_proto, boxed_type);
78
79 auto boxing_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
80
81 auto args = code_ir->Alloc<lir::VRegRange>(src_reg, 1 + is_wide);
82 auto boxing_invoke = code_ir->Alloc<lir::Bytecode>();
83 boxing_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
84 boxing_invoke->operands.push_back(args);
85 boxing_invoke->operands.push_back(boxing_method);
86 code_ir->instructions.InsertBefore(bytecode, boxing_invoke);
87
88 auto move_result = code_ir->Alloc<lir::Bytecode>();
89 move_result->opcode = dex::OP_MOVE_RESULT_OBJECT;
90 move_result->operands.push_back(code_ir->Alloc<lir::VReg>(dst_reg));
91 code_ir->instructions.InsertBefore(bytecode, move_result);
92 }
93
94 } // namespace
95
Apply(lir::CodeIr * code_ir)96 bool EntryHook::Apply(lir::CodeIr* code_ir) {
97 lir::Bytecode* bytecode = nullptr;
98 // find the first bytecode in the method body to insert the hook before it
99 for (auto instr : code_ir->instructions) {
100 BytecodeConvertingVisitor visitor;
101 instr->Accept(&visitor);
102 bytecode = visitor.out;
103 if (bytecode != nullptr) {
104 break;
105 }
106 }
107 if (bytecode == nullptr) {
108 return false;
109 }
110 if (tweak_ == Tweak::ArrayParams) {
111 return InjectArrayParamsHook(code_ir, bytecode);
112 }
113
114 ir::Builder builder(code_ir->dex_ir);
115 const auto ir_method = code_ir->ir_method;
116
117 // construct the hook method declaration
118 std::vector<ir::Type*> param_types;
119 if ((ir_method->access_flags & dex::kAccStatic) == 0) {
120 ir::Type* this_argument_type;
121 switch (tweak_) {
122 case Tweak::ThisAsObject:
123 this_argument_type = builder.GetType("Ljava/lang/Object;");
124 break;
125 default:
126 this_argument_type = ir_method->decl->parent;
127 break;
128 }
129 param_types.push_back(this_argument_type);
130 }
131 if (ir_method->decl->prototype->param_types != nullptr) {
132 const auto& orig_param_types = ir_method->decl->prototype->param_types->types;
133 param_types.insert(param_types.end(), orig_param_types.begin(), orig_param_types.end());
134 }
135
136 auto ir_proto = builder.GetProto(builder.GetType("V"),
137 builder.GetTypeList(param_types));
138
139 auto ir_method_decl = builder.GetMethodDecl(
140 builder.GetAsciiString(hook_method_id_.method_name), ir_proto,
141 builder.GetType(hook_method_id_.class_descriptor));
142
143 auto hook_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
144
145 // argument registers
146 auto regs = ir_method->code->registers;
147 auto args_count = ir_method->code->ins_count;
148 auto args = code_ir->Alloc<lir::VRegRange>(regs - args_count, args_count);
149
150 // invoke hook bytecode
151 auto hook_invoke = code_ir->Alloc<lir::Bytecode>();
152 hook_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
153 hook_invoke->operands.push_back(args);
154 hook_invoke->operands.push_back(hook_method);
155
156 // insert the hook before the first bytecode in the method body
157 code_ir->instructions.InsertBefore(bytecode, hook_invoke);
158 return true;
159 }
160
GenerateShiftParamsCode(lir::CodeIr * code_ir,lir::Instruction * position,dex::u4 shift)161 void GenerateShiftParamsCode(lir::CodeIr* code_ir, lir::Instruction* position, dex::u4 shift) {
162 const auto ir_method = code_ir->ir_method;
163 SLICER_CHECK(ir_method->code->ins_count > 0);
164
165 // build a param list with the explicit "this" argument for non-static methods
166 std::vector<ir::Type*> param_types;
167 if ((ir_method->access_flags & dex::kAccStatic) == 0) {
168 param_types.push_back(ir_method->decl->parent);
169 }
170 if (ir_method->decl->prototype->param_types != nullptr) {
171 const auto& orig_param_types = ir_method->decl->prototype->param_types->types;
172 param_types.insert(param_types.end(), orig_param_types.begin(), orig_param_types.end());
173 }
174
175 const dex::u4 regs = ir_method->code->registers;
176 const dex::u4 ins_count = ir_method->code->ins_count;
177 SLICER_CHECK(regs >= ins_count);
178
179 // generate the args "relocation" instructions
180 dex::u4 reg = regs - ins_count;
181 for (const auto& type : param_types) {
182 auto move = code_ir->Alloc<lir::Bytecode>();
183 switch (type->GetCategory()) {
184 case ir::Type::Category::Reference:
185 move->opcode = dex::OP_MOVE_OBJECT_16;
186 move->operands.push_back(code_ir->Alloc<lir::VReg>(reg - shift));
187 move->operands.push_back(code_ir->Alloc<lir::VReg>(reg));
188 reg += 1;
189 break;
190 case ir::Type::Category::Scalar:
191 move->opcode = dex::OP_MOVE_16;
192 move->operands.push_back(code_ir->Alloc<lir::VReg>(reg - shift));
193 move->operands.push_back(code_ir->Alloc<lir::VReg>(reg));
194 reg += 1;
195 break;
196 case ir::Type::Category::WideScalar:
197 move->opcode = dex::OP_MOVE_WIDE_16;
198 move->operands.push_back(code_ir->Alloc<lir::VRegPair>(reg - shift));
199 move->operands.push_back(code_ir->Alloc<lir::VRegPair>(reg));
200 reg += 2;
201 break;
202 case ir::Type::Category::Void:
203 SLICER_FATAL("void parameter type");
204 }
205 code_ir->instructions.InsertBefore(position, move);
206 }
207 }
208
InjectArrayParamsHook(lir::CodeIr * code_ir,lir::Bytecode * bytecode)209 bool EntryHook::InjectArrayParamsHook(lir::CodeIr* code_ir, lir::Bytecode* bytecode) {
210 ir::Builder builder(code_ir->dex_ir);
211 const auto ir_method = code_ir->ir_method;
212 auto param_types_list = ir_method->decl->prototype->param_types;
213 auto param_types = param_types_list != nullptr ? param_types_list->types : std::vector<ir::Type*>();
214 bool is_static = (ir_method->access_flags & dex::kAccStatic) != 0;
215
216 bool needsBoxingReg = false;
217 for (auto type: param_types) {
218 needsBoxingReg |= type->GetCategory() != ir::Type::Category::Reference;
219 }
220
221 // number of registers that we need to operate
222 dex::u2 regs_count = 2 + needsBoxingReg;
223 auto non_param_regs = ir_method->code->registers - ir_method->code->ins_count;
224
225 // do we have enough registers to operate?
226 bool needsExtraRegs = non_param_regs < regs_count;
227 if (needsExtraRegs) {
228 // we don't have enough registers, so we allocate more, we will shift
229 // params to their original registers later.
230 code_ir->ir_method->code->registers += regs_count - non_param_regs;
231 }
232
233 // simply use three first registry now
234
235 // register that will store size of during allocation
236 // later will be reused to store index when do "aput"
237 dex::u4 array_size_reg = 0;
238 // register that will store an array that will be passed
239 // as a parameter in entry hook
240 dex::u4 array_reg = 1;
241 // if we need to boxing, this register stores result of boxing
242 dex::u4 boxing_reg = needsBoxingReg ? 2 : 0;
243 // array size bytecode
244 auto const_size_op = code_ir->Alloc<lir::Bytecode>();
245 const_size_op->opcode = dex::OP_CONST;
246 const_size_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_size_reg));
247 const_size_op->operands.push_back(code_ir->Alloc<lir::Const32>(param_types.size() + !is_static));
248 code_ir->instructions.InsertBefore(bytecode, const_size_op);
249
250 // allocate array
251 const auto obj_array_type = builder.GetType("[Ljava/lang/Object;");
252 auto allocate_array_op = code_ir->Alloc<lir::Bytecode>();
253 allocate_array_op->opcode = dex::OP_NEW_ARRAY;
254 allocate_array_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_reg));
255 allocate_array_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_size_reg));
256 allocate_array_op->operands.push_back(
257 code_ir->Alloc<lir::Type>(obj_array_type, obj_array_type->orig_index));
258 code_ir->instructions.InsertBefore(bytecode, allocate_array_op);
259
260 // fill the array with parameters passed into function
261
262 std::vector<ir::Type*> types;
263 if (!is_static) {
264 types.push_back(ir_method->decl->parent);
265 }
266
267 types.insert(types.end(), param_types.begin(), param_types.end());
268
269 // register where params start
270 dex::u4 current_reg = ir_method->code->registers - ir_method->code->ins_count;
271 // reuse not needed anymore register to store indexes
272 dex::u4 array_index_reg = array_size_reg;
273 int i = 0;
274 for (auto type: types) {
275 dex::u4 src_reg = 0;
276 if (type->GetCategory() != ir::Type::Category::Reference) {
277 BoxValue(bytecode, code_ir, type, current_reg, boxing_reg);
278 src_reg = boxing_reg;
279 current_reg += 1 + (type->GetCategory() == ir::Type::Category::WideScalar);
280 } else {
281 src_reg = current_reg;
282 current_reg++;
283 }
284
285 auto index_const_op = code_ir->Alloc<lir::Bytecode>();
286 index_const_op->opcode = dex::OP_CONST;
287 index_const_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_index_reg));
288 index_const_op->operands.push_back(code_ir->Alloc<lir::Const32>(i++));
289 code_ir->instructions.InsertBefore(bytecode, index_const_op);
290
291 auto aput_op = code_ir->Alloc<lir::Bytecode>();
292 aput_op->opcode = dex::OP_APUT_OBJECT;
293 aput_op->operands.push_back(code_ir->Alloc<lir::VReg>(src_reg));
294 aput_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_reg));
295 aput_op->operands.push_back(code_ir->Alloc<lir::VReg>(array_index_reg));
296 code_ir->instructions.InsertBefore(bytecode, aput_op);
297 }
298
299 std::vector<ir::Type*> hook_param_types;
300 hook_param_types.push_back(obj_array_type);
301
302 auto ir_proto = builder.GetProto(builder.GetType("V"),
303 builder.GetTypeList(hook_param_types));
304
305 auto ir_method_decl = builder.GetMethodDecl(
306 builder.GetAsciiString(hook_method_id_.method_name), ir_proto,
307 builder.GetType(hook_method_id_.class_descriptor));
308
309 auto hook_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
310 auto args = code_ir->Alloc<lir::VRegRange>(array_reg, 1);
311 auto hook_invoke = code_ir->Alloc<lir::Bytecode>();
312 hook_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
313 hook_invoke->operands.push_back(args);
314 hook_invoke->operands.push_back(hook_method);
315 code_ir->instructions.InsertBefore(bytecode, hook_invoke);
316
317 // clean up registries used by us
318 // registers are assigned to a marker value 0xFE_FE_FE_FE (decimal
319 // value: -16843010) to help identify use of uninitialized registers.
320 for (dex::u2 i = 0; i < regs_count; ++i) {
321 auto cleanup = code_ir->Alloc<lir::Bytecode>();
322 cleanup->opcode = dex::OP_CONST;
323 cleanup->operands.push_back(code_ir->Alloc<lir::VReg>(i));
324 cleanup->operands.push_back(code_ir->Alloc<lir::Const32>(0xFEFEFEFE));
325 code_ir->instructions.InsertBefore(bytecode, cleanup);
326 }
327
328 // now we have to shift params to their original registers
329 if (needsExtraRegs) {
330 GenerateShiftParamsCode(code_ir, bytecode, regs_count - non_param_regs);
331 }
332 return true;
333 }
334
Apply(lir::CodeIr * code_ir)335 bool ExitHook::Apply(lir::CodeIr* code_ir) {
336 ir::Builder builder(code_ir->dex_ir);
337 const auto ir_method = code_ir->ir_method;
338 const auto declared_return_type = ir_method->decl->prototype->return_type;
339 bool return_as_object = tweak_ == Tweak::ReturnAsObject;
340 // do we have a void-return method?
341 bool return_void = (::strcmp(declared_return_type->descriptor->c_str(), "V") == 0);
342 // returnAsObject supports only object return type;
343 SLICER_CHECK(!return_as_object ||
344 (declared_return_type->GetCategory() == ir::Type::Category::Reference));
345 const auto return_type = return_as_object ? builder.GetType("Ljava/lang/Object;")
346 : declared_return_type;
347
348 // construct the hook method declaration
349 std::vector<ir::Type*> param_types;
350 if (!return_void) {
351 param_types.push_back(return_type);
352 }
353
354 auto ir_proto = builder.GetProto(return_type, builder.GetTypeList(param_types));
355
356 auto ir_method_decl = builder.GetMethodDecl(
357 builder.GetAsciiString(hook_method_id_.method_name), ir_proto,
358 builder.GetType(hook_method_id_.class_descriptor));
359
360 auto hook_method = code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
361
362 // find and instrument all return instructions
363 for (auto instr : code_ir->instructions) {
364 BytecodeConvertingVisitor visitor;
365 instr->Accept(&visitor);
366 auto bytecode = visitor.out;
367 if (bytecode == nullptr) {
368 continue;
369 }
370
371 dex::Opcode move_result_opcode = dex::OP_NOP;
372 dex::u4 reg = 0;
373 int reg_count = 0;
374
375 switch (bytecode->opcode) {
376 case dex::OP_RETURN_VOID:
377 SLICER_CHECK(return_void);
378 break;
379 case dex::OP_RETURN:
380 SLICER_CHECK(!return_void);
381 move_result_opcode = dex::OP_MOVE_RESULT;
382 reg = bytecode->CastOperand<lir::VReg>(0)->reg;
383 reg_count = 1;
384 break;
385 case dex::OP_RETURN_OBJECT:
386 SLICER_CHECK(!return_void);
387 move_result_opcode = dex::OP_MOVE_RESULT_OBJECT;
388 reg = bytecode->CastOperand<lir::VReg>(0)->reg;
389 reg_count = 1;
390 break;
391 case dex::OP_RETURN_WIDE:
392 SLICER_CHECK(!return_void);
393 move_result_opcode = dex::OP_MOVE_RESULT_WIDE;
394 reg = bytecode->CastOperand<lir::VRegPair>(0)->base_reg;
395 reg_count = 2;
396 break;
397 default:
398 // skip the bytecode...
399 continue;
400 }
401
402 // invoke hook bytecode
403 auto args = code_ir->Alloc<lir::VRegRange>(reg, reg_count);
404 auto hook_invoke = code_ir->Alloc<lir::Bytecode>();
405 hook_invoke->opcode = dex::OP_INVOKE_STATIC_RANGE;
406 hook_invoke->operands.push_back(args);
407 hook_invoke->operands.push_back(hook_method);
408 code_ir->instructions.InsertBefore(bytecode, hook_invoke);
409
410 // move result back to the right register
411 //
412 // NOTE: we're reusing the original return's operand,
413 // which is valid and more efficient than allocating
414 // a new LIR node, but it's also fragile: we need to be
415 // very careful about mutating shared nodes.
416 //
417 if (move_result_opcode != dex::OP_NOP) {
418 auto move_result = code_ir->Alloc<lir::Bytecode>();
419 move_result->opcode = move_result_opcode;
420 move_result->operands.push_back(bytecode->operands[0]);
421 code_ir->instructions.InsertBefore(bytecode, move_result);
422
423 if (tweak_ == Tweak::ReturnAsObject) {
424 auto check_cast = code_ir->Alloc<lir::Bytecode>();
425 check_cast->opcode = dex::OP_CHECK_CAST;
426 check_cast->operands.push_back(code_ir->Alloc<lir::VReg>(reg));
427 check_cast->operands.push_back(
428 code_ir->Alloc<lir::Type>(declared_return_type, declared_return_type->orig_index));
429 code_ir->instructions.InsertBefore(bytecode, check_cast);
430 }
431 }
432 }
433
434 return true;
435 }
436
Apply(lir::CodeIr * code_ir)437 bool DetourHook::Apply(lir::CodeIr* code_ir) {
438 ir::Builder builder(code_ir->dex_ir);
439
440 // search for matching invoke-virtual[/range] bytecodes
441 for (auto instr : code_ir->instructions) {
442 BytecodeConvertingVisitor visitor;
443 instr->Accept(&visitor);
444 auto bytecode = visitor.out;
445 if (bytecode == nullptr) {
446 continue;
447 }
448
449 dex::Opcode new_call_opcode = GetNewOpcode(bytecode->opcode);
450 if (new_call_opcode == dex::OP_NOP) {
451 continue;
452 }
453
454 auto orig_method = bytecode->CastOperand<lir::Method>(1)->ir_method;
455 if (!orig_method_id_.Match(orig_method)) {
456 // this is not the method you're looking for...
457 continue;
458 }
459
460 // construct the detour method declaration
461 // (matching the original method, plus an explicit "this" argument)
462 std::vector<ir::Type*> param_types;
463 param_types.push_back(orig_method->parent);
464 if (orig_method->prototype->param_types != nullptr) {
465 const auto& orig_param_types = orig_method->prototype->param_types->types;
466 param_types.insert(param_types.end(), orig_param_types.begin(),
467 orig_param_types.end());
468 }
469
470 auto ir_proto = builder.GetProto(orig_method->prototype->return_type,
471 builder.GetTypeList(param_types));
472
473 auto ir_method_decl = builder.GetMethodDecl(
474 builder.GetAsciiString(detour_method_id_.method_name), ir_proto,
475 builder.GetType(detour_method_id_.class_descriptor));
476
477 auto detour_method =
478 code_ir->Alloc<lir::Method>(ir_method_decl, ir_method_decl->orig_index);
479
480 // We mutate the original invoke bytecode in-place: this is ok
481 // because lir::Instructions can't be shared (referenced multiple times)
482 // in the code IR. It's also simpler and more efficient than allocating a
483 // new IR invoke bytecode.
484 bytecode->opcode = new_call_opcode;
485 bytecode->operands[1] = detour_method;
486 }
487
488 return true;
489 }
490
GetNewOpcode(dex::Opcode opcode)491 dex::Opcode DetourVirtualInvoke::GetNewOpcode(dex::Opcode opcode) {
492 switch (opcode) {
493 case dex::OP_INVOKE_VIRTUAL:
494 return dex::OP_INVOKE_STATIC;
495 case dex::OP_INVOKE_VIRTUAL_RANGE:
496 return dex::OP_INVOKE_STATIC_RANGE;
497 default:
498 // skip instruction ...
499 return dex::OP_NOP;
500 }
501 }
502
GetNewOpcode(dex::Opcode opcode)503 dex::Opcode DetourInterfaceInvoke::GetNewOpcode(dex::Opcode opcode) {
504 switch (opcode) {
505 case dex::OP_INVOKE_INTERFACE:
506 return dex::OP_INVOKE_STATIC;
507 case dex::OP_INVOKE_INTERFACE_RANGE:
508 return dex::OP_INVOKE_STATIC_RANGE;
509 default:
510 // skip instruction ...
511 return dex::OP_NOP;
512 }
513 }
514
515 // Register re-numbering visitor
516 // (renumbers vN to vN+shift)
517 class RegsRenumberVisitor : public lir::Visitor {
518 public:
RegsRenumberVisitor(int shift)519 explicit RegsRenumberVisitor(int shift) : shift_(shift) {
520 SLICER_CHECK(shift > 0);
521 }
522
523 private:
Visit(lir::Bytecode * bytecode)524 virtual bool Visit(lir::Bytecode* bytecode) override {
525 for (auto operand : bytecode->operands) {
526 operand->Accept(this);
527 }
528 return true;
529 }
530
Visit(lir::DbgInfoAnnotation * dbg_annotation)531 virtual bool Visit(lir::DbgInfoAnnotation* dbg_annotation) override {
532 for (auto operand : dbg_annotation->operands) {
533 operand->Accept(this);
534 }
535 return true;
536 }
537
Visit(lir::VReg * vreg)538 virtual bool Visit(lir::VReg* vreg) override {
539 vreg->reg += shift_;
540 return true;
541 }
542
Visit(lir::VRegPair * vreg_pair)543 virtual bool Visit(lir::VRegPair* vreg_pair) override {
544 vreg_pair->base_reg += shift_;
545 return true;
546 }
547
Visit(lir::VRegList * vreg_list)548 virtual bool Visit(lir::VRegList* vreg_list) override {
549 for (auto& reg : vreg_list->registers) {
550 reg += shift_;
551 }
552 return true;
553 }
554
Visit(lir::VRegRange * vreg_range)555 virtual bool Visit(lir::VRegRange* vreg_range) override {
556 vreg_range->base_reg += shift_;
557 return true;
558 }
559
560 private:
561 int shift_ = 0;
562 };
563
564 // Try to allocate registers by renumbering the existing allocation
565 //
566 // NOTE: we can't bump the register count over 16 since it may
567 // make existing bytecodes "unencodable" (if they have 4 bit reg fields)
568 //
RegsRenumbering(lir::CodeIr * code_ir)569 void AllocateScratchRegs::RegsRenumbering(lir::CodeIr* code_ir) {
570 SLICER_CHECK(left_to_allocate_ > 0);
571 int delta = std::min(left_to_allocate_,
572 16 - static_cast<int>(code_ir->ir_method->code->registers));
573 if (delta < 1) {
574 // can't allocate any registers through renumbering
575 return;
576 }
577 assert(delta <= 16);
578
579 // renumber existing registers
580 RegsRenumberVisitor visitor(delta);
581 for (auto instr : code_ir->instructions) {
582 instr->Accept(&visitor);
583 }
584
585 // we just allocated "delta" registers (v0..vX)
586 Allocate(code_ir, 0, delta);
587 }
588
589 // Allocates registers by generating prologue code to relocate params
590 // into their original registers (parameters are allocated in the last IN registers)
591 //
592 // There are three types of register moves depending on the value type:
593 // 1. vreg -> vreg
594 // 2. vreg/wide -> vreg/wide
595 // 3. vreg/obj -> vreg/obj
596 //
ShiftParams(lir::CodeIr * code_ir)597 void AllocateScratchRegs::ShiftParams(lir::CodeIr* code_ir) {
598 const auto ir_method = code_ir->ir_method;
599 SLICER_CHECK(left_to_allocate_ > 0);
600
601 const dex::u4 shift = left_to_allocate_;
602 Allocate(code_ir, ir_method->code->registers, left_to_allocate_);
603 assert(left_to_allocate_ == 0);
604
605 // generate the args "relocation" instructions
606 auto first_instr = *(code_ir->instructions.begin());
607 GenerateShiftParamsCode(code_ir, first_instr, shift);
608 }
609
610 // Mark [first_reg, first_reg + count) as scratch registers
Allocate(lir::CodeIr * code_ir,dex::u4 first_reg,int count)611 void AllocateScratchRegs::Allocate(lir::CodeIr* code_ir, dex::u4 first_reg, int count) {
612 SLICER_CHECK(count > 0 && count <= left_to_allocate_);
613 code_ir->ir_method->code->registers += count;
614 left_to_allocate_ -= count;
615 for (int i = 0; i < count; ++i) {
616 SLICER_CHECK(scratch_regs_.insert(first_reg + i).second);
617 }
618 }
619
620 // Allocate scratch registers without doing a full register allocation:
621 //
622 // 1. if there are not params, increase the method regs count and we're done
623 // 2. if the method uses less than 16 registers, we can renumber the existing registers
624 // 3. if we still have registers to allocate, increase the method registers count,
625 // and generate prologue code to shift the param regs into their original registers
626 //
Apply(lir::CodeIr * code_ir)627 bool AllocateScratchRegs::Apply(lir::CodeIr* code_ir) {
628 const auto code = code_ir->ir_method->code;
629 // .dex bytecode allows up to 64k vregs
630 SLICER_CHECK(code->registers + allocate_count_ <= (1 << 16));
631
632 scratch_regs_.clear();
633 left_to_allocate_ = allocate_count_;
634
635 // can we allocate by simply incrementing the method regs count?
636 if (code->ins_count == 0) {
637 Allocate(code_ir, code->registers, left_to_allocate_);
638 return true;
639 }
640
641 // allocate as many registers as possible using renumbering
642 if (allow_renumbering_) {
643 RegsRenumbering(code_ir);
644 }
645
646 // if we still have registers to allocate, generate prologue
647 // code to shift the params into their original registers
648 if (left_to_allocate_ > 0) {
649 ShiftParams(code_ir);
650 }
651
652 assert(left_to_allocate_ == 0);
653 assert(scratch_regs_.size() == size_t(allocate_count_));
654 return true;
655 }
656
InstrumentMethod(ir::EncodedMethod * ir_method)657 bool MethodInstrumenter::InstrumentMethod(ir::EncodedMethod* ir_method) {
658 SLICER_CHECK(ir_method != nullptr);
659 if (ir_method->code == nullptr) {
660 // can't instrument abstract methods
661 return false;
662 }
663
664 // apply all the queued transformations
665 lir::CodeIr code_ir(ir_method, dex_ir_);
666 for (const auto& transformation : transformations_) {
667 if (!transformation->Apply(&code_ir)) {
668 // the transformation failed, bail out...
669 return false;
670 }
671 }
672 code_ir.Assemble();
673 return true;
674 }
675
InstrumentMethod(const ir::MethodId & method_id)676 bool MethodInstrumenter::InstrumentMethod(const ir::MethodId& method_id) {
677 // locate the method to be instrumented
678 ir::Builder builder(dex_ir_);
679 auto ir_method = builder.FindMethod(method_id);
680 if (ir_method == nullptr) {
681 // we couldn't find the specified method
682 return false;
683 }
684 return InstrumentMethod(ir_method);
685 }
686
687 } // namespace slicer
688