Lines Matching refs:thunk_offset

222   bool CheckThunk(uint32_t thunk_offset) {  in CheckThunk()  argument
224 if (output_.size() < thunk_offset + expected_code.size()) { in CheckThunk()
226 << "thunk_offset + expected_code.size() == " << (thunk_offset + expected_code.size()); in CheckThunk()
229 ArrayRef<const uint8_t> linked_code(&output_[thunk_offset], expected_code.size()); in CheckThunk()
393 uint32_t thunk_offset = in TestNopsAdrpInsn2AndUseHasThunk() local
395 uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u); in TestNopsAdrpInsn2AndUseHasThunk()
409 auto expected_thunk_code = GenNopsAndAdrpLdr(0u, thunk_offset, target_offset); in TestNopsAdrpInsn2AndUseHasThunk()
416 ASSERT_EQ(thunk_offset + thunk_size, output_.size()); in TestNopsAdrpInsn2AndUseHasThunk()
418 ArrayRef<const uint8_t> thunk_code(&output_[thunk_offset], thunk_size); in TestNopsAdrpInsn2AndUseHasThunk()
654 uint32_t thunk_offset = in TEST_F() local
656 uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method); in TEST_F()
662 EXPECT_TRUE(CheckThunk(thunk_offset)); in TEST_F()
750 uint32_t thunk_offset = RoundDown(method_after_thunk_header_offset - thunk_size, kArm64Alignment); in TEST_F() local
751 DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size), in TEST_F()
753 ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset)); in TEST_F()
754 uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1); in TEST_F()
759 CheckThunk(thunk_offset); in TEST_F()
782 uint32_t thunk_offset = in TEST_F() local
784 uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method); in TEST_F()
790 EXPECT_TRUE(CheckThunk(thunk_offset)); in TEST_F()
1018 uint32_t thunk_offset = CompiledCode::AlignCode(method_offset + kCallCode.size(), in TEST_F() local
1020 uint32_t diff = thunk_offset - method_offset; in TEST_F()
1034 ASSERT_LE(8u, output_.size() - thunk_offset); in TEST_F()
1035 EXPECT_EQ(ldr_ip0_tr_offset, GetOutputInsn(thunk_offset)); in TEST_F()
1036 EXPECT_EQ(br_ip0, GetOutputInsn(thunk_offset + 4u)); in TEST_F()
1068 uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment); in TestBakerField() local
1073 uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset); in TestBakerField()
1082 ASSERT_GT(output_.size(), thunk_offset); in TestBakerField()
1083 ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); in TestBakerField()
1084 ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset, in TestBakerField()
1091 size_t gray_check_offset = thunk_offset; in TestBakerField()
1095 ASSERT_EQ(0x34000000u | holder_reg, GetOutputInsn(thunk_offset) & 0xff00001fu); in TestBakerField()
1121 thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment); in TestBakerField()
1318 uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment); in TEST_F() local
1322 uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset); in TEST_F()
1329 ASSERT_GT(output_.size(), thunk_offset); in TEST_F()
1330 ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); in TEST_F()
1331 ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset, in TEST_F()
1341 ASSERT_GE(output_.size() - thunk_offset, 4u * kGrayCheckInsns); in TEST_F()
1351 EXPECT_EQ(load_lock_word, GetOutputInsn(thunk_offset)); in TEST_F()
1355 EXPECT_EQ(check_gray_bit_without_offset, GetOutputInsn(thunk_offset + 4u) & 0xfff8001fu); in TEST_F()
1362 EXPECT_EQ(fake_dependency, GetOutputInsn(thunk_offset + 12u)); in TEST_F()
1366 thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment); in TEST_F()
1395 uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment); in TEST_F() local
1399 uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset); in TEST_F()
1407 ASSERT_GT(output_.size(), thunk_offset); in TEST_F()
1408 ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); in TEST_F()
1409 ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset, in TEST_F()
1417 ASSERT_GE(output_.size() - thunk_offset, 4u); in TEST_F()
1418 ASSERT_EQ(0x34000000u | root_reg, GetOutputInsn(thunk_offset) & 0xff00001fu); in TEST_F()
1422 thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment); in TEST_F()