Merge "ART: Add GetLineNumberTable"
diff --git a/build/art.go b/build/art.go
index ccaa11d..0af1767 100644
--- a/build/art.go
+++ b/build/art.go
@@ -259,7 +259,7 @@
}
func artLibrary() (blueprint.Module, []interface{}) {
- library, _ := cc.NewLibrary(android.HostAndDeviceSupported, true, true)
+ library, _ := cc.NewLibrary(android.HostAndDeviceSupported)
module, props := library.Init()
props = installCodegenCustomizer(module, props, true)
diff --git a/compiler/Android.bp b/compiler/Android.bp
index db55ea0..2eb6fba 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -145,6 +145,7 @@
mips64: {
srcs: [
"jni/quick/mips64/calling_convention_mips64.cc",
+ "linker/mips64/relative_patcher_mips64.cc",
"optimizing/code_generator_mips64.cc",
"optimizing/intrinsics_mips64.cc",
"utils/mips64/assembler_mips64.cc",
@@ -381,6 +382,11 @@
"linker/mips/relative_patcher_mips32r6_test.cc",
],
},
+ mips64: {
+ srcs: [
+ "linker/mips64/relative_patcher_mips64_test.cc",
+ ],
+ },
x86: {
srcs: [
"linker/x86/relative_patcher_x86_test.cc",
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 1bdace9..a5979cc 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -218,35 +218,18 @@
for (; inst < end; inst = inst->Next()) {
Instruction::Code code = inst->Opcode();
- if ((code == Instruction::CHECK_CAST) || (code == Instruction::APUT_OBJECT)) {
+ if (code == Instruction::CHECK_CAST) {
uint32_t dex_pc = inst->GetDexPc(code_item->insns_);
if (!method_verifier->GetInstructionFlags(dex_pc).IsVisited()) {
// Do not attempt to quicken this instruction, it's unreachable anyway.
continue;
}
const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- bool is_safe_cast = false;
- if (code == Instruction::CHECK_CAST) {
- const verifier::RegType& reg_type(line->GetRegisterType(method_verifier,
- inst->VRegA_21c()));
- const verifier::RegType& cast_type =
- method_verifier->ResolveCheckedClass(dex::TypeIndex(inst->VRegB_21c()));
- is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier);
- } else {
- const verifier::RegType& array_type(line->GetRegisterType(method_verifier,
- inst->VRegB_23x()));
- // We only know its safe to assign to an array if the array type is precise. For example,
- // an Object[] can have any type of object stored in it, but it may also be assigned a
- // String[] in which case the stores need to be of Strings.
- if (array_type.IsPreciseReference()) {
- const verifier::RegType& value_type(line->GetRegisterType(method_verifier,
- inst->VRegA_23x()));
- const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
- ->GetComponentType(array_type, method_verifier->GetClassLoader());
- is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type, method_verifier);
- }
- }
- if (is_safe_cast) {
+ const verifier::RegType& reg_type(line->GetRegisterType(method_verifier,
+ inst->VRegA_21c()));
+ const verifier::RegType& cast_type =
+ method_verifier->ResolveCheckedClass(dex::TypeIndex(inst->VRegB_21c()));
+ if (cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier)) {
// Verify ordering for push_back() to the sorted vector.
DCHECK(safe_cast_set_.empty() || safe_cast_set_.back() < dex_pc);
safe_cast_set_.push_back(dex_pc);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e62bdb5..1b1de78 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -299,7 +299,7 @@
dump_passes_(dump_passes),
timings_logger_(timer),
compiler_context_(nullptr),
- support_boot_image_fixup_(instruction_set != kMips64),
+ support_boot_image_fixup_(true),
dex_files_for_oat_file_(nullptr),
compiled_method_storage_(swap_fd),
profile_compilation_info_(profile_compilation_info),
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index c537483..cc7df1c 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -27,6 +27,7 @@
#include <string>
#include <ostream>
+#include "art_method.h"
#include "base/bit_utils.h"
#include "base/dchecked_vector.h"
#include "base/enums.h"
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index a16aaca..4f9a3a0 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -29,10 +29,10 @@
Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
protected:
- static const uint8_t UnpatchedPcRelativeRawCode[];
- static const uint32_t LiteralOffset;
- static const uint32_t AnchorOffset;
- static const ArrayRef<const uint8_t> UnpatchedPcRelativeCode;
+ static const uint8_t kUnpatchedPcRelativeRawCode[];
+ static const uint32_t kLiteralOffset;
+ static const uint32_t kAnchorOffset;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
uint32_t GetMethodOffset(uint32_t method_idx) {
auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
@@ -45,24 +45,25 @@
void TestStringReference(uint32_t string_offset);
};
-const uint8_t Mips32r6RelativePatcherTest::UnpatchedPcRelativeRawCode[] = {
+const uint8_t Mips32r6RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
0x34, 0x12, 0x5E, 0xEE, // auipc s2, high(diff); placeholder = 0x1234
0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
};
-const uint32_t Mips32r6RelativePatcherTest::LiteralOffset = 0; // At auipc (where patching starts).
-const uint32_t Mips32r6RelativePatcherTest::AnchorOffset = 0; // At auipc (where PC+0 points).
-const ArrayRef<const uint8_t> Mips32r6RelativePatcherTest::UnpatchedPcRelativeCode(
- UnpatchedPcRelativeRawCode);
+const uint32_t Mips32r6RelativePatcherTest::kLiteralOffset = 0; // At auipc (where
+ // patching starts).
+const uint32_t Mips32r6RelativePatcherTest::kAnchorOffset = 0; // At auipc (where PC+0 points).
+const ArrayRef<const uint8_t> Mips32r6RelativePatcherTest::kUnpatchedPcRelativeCode(
+ kUnpatchedPcRelativeRawCode);
void Mips32r6RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
uint32_t target_offset) {
- AddCompiledMethod(MethodRef(1u), UnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
ASSERT_TRUE(result.first);
- uint32_t diff = target_offset - (result.second + AnchorOffset);
+ uint32_t diff = target_offset - (result.second + kAnchorOffset);
if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
diff += kDexCacheArrayLwOffset;
}
@@ -79,7 +80,7 @@
uint32_t element_offset) {
dex_cache_arrays_begin_ = dex_cache_arrays_begin;
LinkerPatch patches[] = {
- LinkerPatch::DexCacheArrayPatch(LiteralOffset, nullptr, AnchorOffset, element_offset)
+ LinkerPatch::DexCacheArrayPatch(kLiteralOffset, nullptr, kAnchorOffset, element_offset)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
dex_cache_arrays_begin_ + element_offset);
@@ -89,7 +90,7 @@
constexpr uint32_t kStringIndex = 1u;
string_index_to_offset_map_.Put(kStringIndex, string_offset);
LinkerPatch patches[] = {
- LinkerPatch::RelativeStringPatch(LiteralOffset, nullptr, AnchorOffset, kStringIndex)
+ LinkerPatch::RelativeStringPatch(kLiteralOffset, nullptr, kAnchorOffset, kStringIndex)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
}
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index 335ce2e..faeb92a 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -29,10 +29,10 @@
MipsRelativePatcherTest() : RelativePatcherTest(kMips, "mips32r2") {}
protected:
- static const uint8_t UnpatchedPcRelativeRawCode[];
- static const uint32_t LiteralOffset;
- static const uint32_t AnchorOffset;
- static const ArrayRef<const uint8_t> UnpatchedPcRelativeCode;
+ static const uint8_t kUnpatchedPcRelativeRawCode[];
+ static const uint32_t kLiteralOffset;
+ static const uint32_t kAnchorOffset;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
uint32_t GetMethodOffset(uint32_t method_idx) {
auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
@@ -45,26 +45,26 @@
void TestStringReference(uint32_t string_offset);
};
-const uint8_t MipsRelativePatcherTest::UnpatchedPcRelativeRawCode[] = {
+const uint8_t MipsRelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
0x00, 0x00, 0x10, 0x04, // nal
0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
0x78, 0x56, 0x52, 0x36, // ori s2, s2, low(diff); placeholder = 0x5678
0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
};
-const uint32_t MipsRelativePatcherTest::LiteralOffset = 4; // At lui (where patching starts).
-const uint32_t MipsRelativePatcherTest::AnchorOffset = 8; // At ori (where PC+0 points).
-const ArrayRef<const uint8_t> MipsRelativePatcherTest::UnpatchedPcRelativeCode(
- UnpatchedPcRelativeRawCode);
+const uint32_t MipsRelativePatcherTest::kLiteralOffset = 4; // At lui (where patching starts).
+const uint32_t MipsRelativePatcherTest::kAnchorOffset = 8; // At ori (where PC+0 points).
+const ArrayRef<const uint8_t> MipsRelativePatcherTest::kUnpatchedPcRelativeCode(
+ kUnpatchedPcRelativeRawCode);
void MipsRelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
uint32_t target_offset) {
- AddCompiledMethod(MethodRef(1u), UnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
ASSERT_TRUE(result.first);
- uint32_t diff = target_offset - (result.second + AnchorOffset);
+ uint32_t diff = target_offset - (result.second + kAnchorOffset);
if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
diff += kDexCacheArrayLwOffset;
}
@@ -82,7 +82,7 @@
uint32_t element_offset) {
dex_cache_arrays_begin_ = dex_cache_arrays_begin;
LinkerPatch patches[] = {
- LinkerPatch::DexCacheArrayPatch(LiteralOffset, nullptr, AnchorOffset, element_offset)
+ LinkerPatch::DexCacheArrayPatch(kLiteralOffset, nullptr, kAnchorOffset, element_offset)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
dex_cache_arrays_begin_ + element_offset);
@@ -92,7 +92,7 @@
constexpr uint32_t kStringIndex = 1u;
string_index_to_offset_map_.Put(kStringIndex, string_offset);
LinkerPatch patches[] = {
- LinkerPatch::RelativeStringPatch(LiteralOffset, nullptr, AnchorOffset, kStringIndex)
+ LinkerPatch::RelativeStringPatch(kLiteralOffset, nullptr, kAnchorOffset, kStringIndex)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
}
diff --git a/compiler/linker/mips64/relative_patcher_mips64.cc b/compiler/linker/mips64/relative_patcher_mips64.cc
new file mode 100644
index 0000000..c479716
--- /dev/null
+++ b/compiler/linker/mips64/relative_patcher_mips64.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "linker/mips64/relative_patcher_mips64.h"
+
+#include "compiled_method.h"
+
+namespace art {
+namespace linker {
+
+uint32_t Mips64RelativePatcher::ReserveSpace(
+ uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
+ MethodReference method_ref ATTRIBUTE_UNUSED) {
+ return offset; // No space reserved; no limit on relative call distance.
+}
+
+uint32_t Mips64RelativePatcher::ReserveSpaceEnd(uint32_t offset) {
+ return offset; // No space reserved; no limit on relative call distance.
+}
+
+uint32_t Mips64RelativePatcher::WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) {
+ return offset; // No thunks added; no limit on relative call distance.
+}
+
+void Mips64RelativePatcher::PatchCall(std::vector<uint8_t>* code,
+ uint32_t literal_offset,
+ uint32_t patch_offset,
+ uint32_t target_offset) {
+ // Basic sanity checks.
+ DCHECK_GE(code->size(), 8u);
+ DCHECK_LE(literal_offset, code->size() - 8u);
+ // auipc reg, offset_high
+ DCHECK_EQ((*code)[literal_offset + 0], 0x34);
+ DCHECK_EQ((*code)[literal_offset + 1], 0x12);
+ DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
+ DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
+ // jialc reg, offset_low
+ DCHECK_EQ((*code)[literal_offset + 4], 0x78);
+ DCHECK_EQ((*code)[literal_offset + 5], 0x56);
+ DCHECK_EQ(((*code)[literal_offset + 6] & 0xE0), 0x00);
+ DCHECK_EQ((*code)[literal_offset + 7], 0xF8);
+
+ // Apply patch.
+ uint32_t diff = target_offset - patch_offset;
+ // Note that a combination of auipc with an instruction that adds a sign-extended
+ // 16-bit immediate operand (e.g. jialc) provides a PC-relative range of
+ // PC-0x80000000 to PC+0x7FFF7FFF on MIPS64, that is, short of 2GB on one end
+ // by 32KB.
+ diff += (diff & 0x8000) << 1; // Account for sign extension in jialc.
+
+ // auipc reg, offset_high
+ (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
+ (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
+ // jialc reg, offset_low
+ (*code)[literal_offset + 4] = static_cast<uint8_t>(diff >> 0);
+ (*code)[literal_offset + 5] = static_cast<uint8_t>(diff >> 8);
+}
+
+void Mips64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
+ const LinkerPatch& patch,
+ uint32_t patch_offset,
+ uint32_t target_offset) {
+ uint32_t anchor_literal_offset = patch.PcInsnOffset();
+ uint32_t literal_offset = patch.LiteralOffset();
+
+ // Basic sanity checks.
+ DCHECK_GE(code->size(), 8u);
+ DCHECK_LE(literal_offset, code->size() - 8u);
+ DCHECK_EQ(literal_offset, anchor_literal_offset);
+ // auipc reg, offset_high
+ DCHECK_EQ((*code)[literal_offset + 0], 0x34);
+ DCHECK_EQ((*code)[literal_offset + 1], 0x12);
+ DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
+ DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
+ // instr reg(s), offset_low
+ DCHECK_EQ((*code)[literal_offset + 4], 0x78);
+ DCHECK_EQ((*code)[literal_offset + 5], 0x56);
+
+ // Apply patch.
+ uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
+ uint32_t diff = target_offset - anchor_offset;
+ // Note that a combination of auipc with an instruction that adds a sign-extended
+ // 16-bit immediate operand (e.g. ld) provides a PC-relative range of
+ // PC-0x80000000 to PC+0x7FFF7FFF on MIPS64, that is, short of 2GB on one end
+ // by 32KB.
+ diff += (diff & 0x8000) << 1; // Account for sign extension in instruction following auipc.
+
+ // auipc reg, offset_high
+ (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
+ (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
+ // instr reg(s), offset_low
+ (*code)[literal_offset + 4] = static_cast<uint8_t>(diff >> 0);
+ (*code)[literal_offset + 5] = static_cast<uint8_t>(diff >> 8);
+}
+
+} // namespace linker
+} // namespace art
diff --git a/compiler/linker/mips64/relative_patcher_mips64.h b/compiler/linker/mips64/relative_patcher_mips64.h
new file mode 100644
index 0000000..8ef8ceb
--- /dev/null
+++ b/compiler/linker/mips64/relative_patcher_mips64.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
+#define ART_COMPILER_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
+
+#include "linker/relative_patcher.h"
+
+namespace art {
+namespace linker {
+
+class Mips64RelativePatcher FINAL : public RelativePatcher {
+ public:
+ explicit Mips64RelativePatcher() {}
+
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method,
+ MethodReference method_ref) OVERRIDE;
+ uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ void PatchCall(std::vector<uint8_t>* code,
+ uint32_t literal_offset,
+ uint32_t patch_offset,
+ uint32_t target_offset) OVERRIDE;
+ void PatchPcRelativeReference(std::vector<uint8_t>* code,
+ const LinkerPatch& patch,
+ uint32_t patch_offset,
+ uint32_t target_offset) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Mips64RelativePatcher);
+};
+
+} // namespace linker
+} // namespace art
+
+#endif // ART_COMPILER_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
diff --git a/compiler/linker/mips64/relative_patcher_mips64_test.cc b/compiler/linker/mips64/relative_patcher_mips64_test.cc
new file mode 100644
index 0000000..9e37f6b
--- /dev/null
+++ b/compiler/linker/mips64/relative_patcher_mips64_test.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "linker/relative_patcher_test.h"
+#include "linker/mips64/relative_patcher_mips64.h"
+
+namespace art {
+namespace linker {
+
+class Mips64RelativePatcherTest : public RelativePatcherTest {
+ public:
+ Mips64RelativePatcherTest() : RelativePatcherTest(kMips64, "default") {}
+
+ protected:
+ static const uint8_t kUnpatchedPcRelativeRawCode[];
+ static const uint8_t kUnpatchedPcRelativeCallRawCode[];
+ static const uint32_t kLiteralOffset;
+ static const uint32_t kAnchorOffset;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCallCode;
+
+ uint32_t GetMethodOffset(uint32_t method_idx) {
+ auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
+ CHECK(result.first);
+ return result.second;
+ }
+
+ void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
+ void TestDexCacheReference(uint32_t dex_cache_arrays_begin, uint32_t element_offset);
+ void TestStringReference(uint32_t string_offset);
+};
+
+const uint8_t Mips64RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
+ 0x34, 0x12, 0x5E, 0xEE, // auipc s2, high(diff); placeholder = 0x1234
+ 0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
+};
+const uint8_t Mips64RelativePatcherTest::kUnpatchedPcRelativeCallRawCode[] = {
+ 0x34, 0x12, 0x3E, 0xEC, // auipc at, high(diff); placeholder = 0x1234
+ 0x78, 0x56, 0x01, 0xF8, // jialc at, low(diff); placeholder = 0x5678
+};
+const uint32_t Mips64RelativePatcherTest::kLiteralOffset = 0; // At auipc (where patching starts).
+const uint32_t Mips64RelativePatcherTest::kAnchorOffset = 0; // At auipc (where PC+0 points).
+const ArrayRef<const uint8_t> Mips64RelativePatcherTest::kUnpatchedPcRelativeCode(
+ kUnpatchedPcRelativeRawCode);
+const ArrayRef<const uint8_t> Mips64RelativePatcherTest::kUnpatchedPcRelativeCallCode(
+ kUnpatchedPcRelativeCallRawCode);
+
+void Mips64RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
+ uint32_t target_offset) {
+ AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
+ Link();
+
+ auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
+ ASSERT_TRUE(result.first);
+
+ uint32_t diff = target_offset - (result.second + kAnchorOffset);
+ diff += (diff & 0x8000) << 1; // Account for sign extension in instruction following auipc.
+
+ const uint8_t expected_code[] = {
+ static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x5E, 0xEE,
+ static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x26,
+ };
+ EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
+}
+
+void Mips64RelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_arrays_begin,
+ uint32_t element_offset) {
+ dex_cache_arrays_begin_ = dex_cache_arrays_begin;
+ LinkerPatch patches[] = {
+ LinkerPatch::DexCacheArrayPatch(kLiteralOffset, nullptr, kAnchorOffset, element_offset)
+ };
+ CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
+ dex_cache_arrays_begin_ + element_offset);
+}
+
+TEST_F(Mips64RelativePatcherTest, DexCacheReference) {
+ TestDexCacheReference(/* dex_cache_arrays_begin */ 0x12345678, /* element_offset */ 0x1234);
+}
+
+TEST_F(Mips64RelativePatcherTest, CallOther) {
+ LinkerPatch method1_patches[] = {
+ LinkerPatch::RelativeCodePatch(kLiteralOffset, nullptr, 2u),
+ };
+ AddCompiledMethod(MethodRef(1u),
+ kUnpatchedPcRelativeCallCode,
+ ArrayRef<const LinkerPatch>(method1_patches));
+ LinkerPatch method2_patches[] = {
+ LinkerPatch::RelativeCodePatch(kLiteralOffset, nullptr, 1u),
+ };
+ AddCompiledMethod(MethodRef(2u),
+ kUnpatchedPcRelativeCallCode,
+ ArrayRef<const LinkerPatch>(method2_patches));
+ Link();
+
+ uint32_t method1_offset = GetMethodOffset(1u);
+ uint32_t method2_offset = GetMethodOffset(2u);
+ uint32_t diff_after = method2_offset - (method1_offset + kAnchorOffset /* PC adjustment */);
+ diff_after += (diff_after & 0x8000) << 1; // Account for sign extension in jialc.
+ static const uint8_t method1_expected_code[] = {
+ static_cast<uint8_t>(diff_after >> 16), static_cast<uint8_t>(diff_after >> 24), 0x3E, 0xEC,
+ static_cast<uint8_t>(diff_after), static_cast<uint8_t>(diff_after >> 8), 0x01, 0xF8,
+ };
+ EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code)));
+ uint32_t diff_before = method1_offset - (method2_offset + kAnchorOffset /* PC adjustment */);
+ diff_before += (diff_before & 0x8000) << 1; // Account for sign extension in jialc.
+ static const uint8_t method2_expected_code[] = {
+ static_cast<uint8_t>(diff_before >> 16), static_cast<uint8_t>(diff_before >> 24), 0x3E, 0xEC,
+ static_cast<uint8_t>(diff_before), static_cast<uint8_t>(diff_before >> 8), 0x01, 0xF8,
+ };
+ EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code)));
+}
+
+} // namespace linker
+} // namespace art
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index 7765594..f1538b1 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -25,6 +25,9 @@
#ifdef ART_ENABLE_CODEGEN_mips
#include "linker/mips/relative_patcher_mips.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+#include "linker/mips64/relative_patcher_mips64.h"
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "linker/x86/relative_patcher_x86.h"
#endif
@@ -103,6 +106,10 @@
return std::unique_ptr<RelativePatcher>(
new MipsRelativePatcher(features->AsMipsInstructionSetFeatures()));
#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+ case kMips64:
+ return std::unique_ptr<RelativePatcher>(new Mips64RelativePatcher());
+#endif
default:
return std::unique_ptr<RelativePatcher>(new RelativePatcherNone);
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 2c6df38..55f3c3c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -57,6 +57,9 @@
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
using RegisterList = vixl32::RegisterList;
static bool ExpectedPairLayout(Location location) {
@@ -843,9 +846,9 @@
__ Subs(tmp, tmp, expected);
{
- AssemblerAccurateScope aas(arm_codegen->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(arm_codegen->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(ne);
__ clrex(ne);
@@ -1261,9 +1264,9 @@
// We are about to use the assembler to place literals directly. Make sure we have enough
// underlying code buffer and we have generated a jump table of the right size, using
// codegen->GetVIXLAssembler()->GetBuffer().Align();
- AssemblerAccurateScope aas(codegen->GetVIXLAssembler(),
- num_entries * sizeof(int32_t),
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(codegen->GetVIXLAssembler(),
+ num_entries * sizeof(int32_t),
+ CodeBufferCheckScope::kMaximumSize);
// TODO(VIXL): Check that using lower case bind is fine here.
codegen->GetVIXLAssembler()->bind(&table_start_);
for (uint32_t i = 0; i < num_entries; i++) {
@@ -1377,9 +1380,9 @@
vixl32::Register temp = temps.Acquire();
__ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
// The load must immediately precede RecordPcInfo.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ldr(temp, MemOperand(temp));
RecordPcInfo(nullptr, 0);
}
@@ -1637,9 +1640,9 @@
__ Ldr(lr, MemOperand(tr, GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value()));
// Ensure the pc position is recorded immediately after the `blx` instruction.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -2082,9 +2085,9 @@
__ Cmp(InputRegisterAt(cond, 0),
CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ 3 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ite(ARMCondition(cond->GetCondition()));
__ mov(ARMCondition(cond->GetCondition()), OutputRegister(cond), 1);
__ mov(ARMCondition(cond->GetOppositeCondition()), OutputRegister(cond), 0);
@@ -2370,9 +2373,9 @@
// Ensure the pc position is recorded immediately after the `ldr` instruction.
{
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// /* HeapReference<Class> */ temp = receiver->klass_
__ ldr(temp, MemOperand(RegisterFrom(receiver), class_offset));
codegen_->MaybeRecordImplicitNullCheck(invoke);
@@ -2418,7 +2421,7 @@
{
// Ensure the pc position is recorded immediately after the `blx` instruction.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
+ ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
// LR();
@@ -3793,9 +3796,9 @@
// If the shift is > 32 bits, override the high part
__ Subs(temp, o_l, Operand::From(kArmBitsPerWord));
{
- AssemblerAccurateScope guard(GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ lsl(pl, o_h, low, temp);
}
@@ -3812,9 +3815,9 @@
// If the shift is > 32 bits, override the low part
__ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
- AssemblerAccurateScope guard(GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ asr(pl, o_l, high, temp);
}
@@ -3829,9 +3832,9 @@
__ Orr(o_l, o_l, temp);
__ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
- AssemblerAccurateScope guard(GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ lsr(pl, o_l, high, temp);
}
@@ -3948,9 +3951,9 @@
GetAssembler()->LoadFromOffset(kLoadWord, temp, tr, QUICK_ENTRY_POINT(pNewEmptyString));
GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, code_offset.Int32Value());
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
@@ -4192,9 +4195,9 @@
__ Bind(&fail);
{
// Ensure the pc position is recorded immediately after the `ldrexd` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// We need a load followed by store. (The address used in a STREX instruction must
// be the same as the address in the most recently executed LDREX instruction.)
__ ldrexd(temp1, temp2, MemOperand(addr));
@@ -4715,9 +4718,9 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
// Ensure the pc position is recorded immediately after the `ldr` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ldr(temps.Acquire(), MemOperand(InputRegisterAt(instruction, 0)));
RecordPcInfo(instruction, instruction->GetDexPc());
}
@@ -5233,9 +5236,9 @@
{
// Ensure we record the pc position immediately after the `ldr` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// /* HeapReference<Class> */ temp1 = array->klass_
__ ldr(temp1, MemOperand(array, class_offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -5384,9 +5387,9 @@
vixl32::Register obj = InputRegisterAt(instruction, 0);
vixl32::Register out = OutputRegister(instruction);
{
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ldr(out, MemOperand(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -7351,9 +7354,9 @@
relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
invoke->GetTargetMethod().dex_method_index);
{
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ bind(&relative_call_patches_.back().label);
// Arbitrarily branch to the BL itself, override at link time.
__ bl(&relative_call_patches_.back().label);
@@ -7365,9 +7368,9 @@
// LR()
{
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
}
break;
@@ -7380,9 +7383,9 @@
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
{
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
// LR()
__ blx(lr);
}
@@ -7406,9 +7409,9 @@
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
{
// Make sure the pc is recorded immediately after the `ldr` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// /* HeapReference<Class> */ temp = receiver->klass_
__ ldr(temp, MemOperand(receiver, class_offset));
MaybeRecordImplicitNullCheck(invoke);
@@ -7433,9 +7436,9 @@
// `RecordPcInfo()` immediately following record the correct pc. Use a scope to help guarantee
// that.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
}
@@ -7702,9 +7705,9 @@
void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder(
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels,
vixl32::Register out) {
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ 3 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// TODO(VIXL): Think about using mov instead of movw.
__ bind(&labels->movw_label);
__ movw(out, /* placeholder */ 0u);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 5ec3da4..93ea601 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -120,7 +120,7 @@
bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
uint32_t num_entries = switch_instr_->GetNumEntries();
for (uint32_t i = 0; i < num_entries; i++) {
- IntLiteral *lit = new IntLiteral(0);
+ IntLiteral *lit = new IntLiteral(0, vixl32::RawLiteral::kManuallyPlaced);
bb_addresses_.emplace_back(lit);
}
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index cae4161..456c5c6 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -757,6 +757,11 @@
if (RequiresCurrentMethod()) {
__ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should deoptimize flag to 0.
+ __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorMIPS::GenerateFrameExit() {
@@ -4689,14 +4694,17 @@
}
}
-void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ LoadFromOffset(kLoadWord,
+ flag->GetLocations()->Out().AsRegister<Register>(),
+ SP,
+ codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
}
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b1f9b1d..44d3759 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -18,6 +18,7 @@
#include "art_method.h"
#include "code_generator_utils.h"
+#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
@@ -399,7 +400,15 @@
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
assembler_(graph->GetArena()),
- isa_features_(isa_features) {
+ isa_features_(isa_features),
+ uint64_literals_(std::less<uint64_t>(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ call_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -510,8 +519,6 @@
RecordPcInfo(nullptr, 0);
}
- // TODO: anything related to T9/GP/GOT/PIC/.so's?
-
if (HasEmptyFrame()) {
return;
}
@@ -562,13 +569,16 @@
"kCurrentMethodStackOffset must fit into int16_t");
__ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorMIPS64::GenerateFrameExit() {
__ cfi().RememberState();
- // TODO: anything related to T9/GP/GOT/PIC/.so's?
-
if (!HasEmptyFrame()) {
// Deallocate the rest of the frame.
@@ -878,6 +888,103 @@
}
}
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorMIPS64::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PcRelativePatchInfo& info : infos) {
+ const DexFile& dex_file = info.target_dex_file;
+ size_t offset_or_index = info.offset_or_index;
+ DCHECK(info.pc_rel_label.IsBound());
+ uint32_t pc_rel_offset = __ GetLabelLocation(&info.pc_rel_label);
+ linker_patches->push_back(Factory(pc_rel_offset, &dex_file, pc_rel_offset, offset_or_index));
+ }
+}
+
+void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
+ DCHECK(linker_patches->empty());
+ size_t size =
+ method_patches_.size() +
+ call_patches_.size() +
+ pc_relative_dex_cache_patches_.size() +
+ relative_call_patches_.size();
+ linker_patches->reserve(size);
+ for (const auto& entry : method_patches_) {
+ const MethodReference& target_method = entry.first;
+ Literal* literal = entry.second;
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
+ linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
+ target_method.dex_file,
+ target_method.dex_method_index));
+ }
+ for (const auto& entry : call_patches_) {
+ const MethodReference& target_method = entry.first;
+ Literal* literal = entry.second;
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
+ linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
+ target_method.dex_file,
+ target_method.dex_method_index));
+ }
+ EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
+ linker_patches);
+ for (const PcRelativePatchInfo& info : relative_call_patches_) {
+ const DexFile& dex_file = info.target_dex_file;
+ uint32_t method_index = info.offset_or_index;
+ DCHECK(info.pc_rel_label.IsBound());
+ uint32_t pc_rel_offset = __ GetLabelLocation(&info.pc_rel_label);
+ linker_patches->push_back(
+ LinkerPatch::RelativeCodePatch(pc_rel_offset, &dex_file, method_index));
+ }
+}
+
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
+ const DexFile& dex_file, uint32_t element_offset) {
+ return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
+}
+
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeCallPatch(
+ const DexFile& dex_file, uint32_t method_index) {
+ return NewPcRelativePatch(dex_file, method_index, &relative_call_patches_);
+}
+
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativePatch(
+ const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
+ patches->emplace_back(dex_file, offset_or_index);
+ return &patches->back();
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateUint64Literal(uint64_t value) {
+ return uint64_literals_.GetOrCreate(
+ value,
+ [this, value]() { return __ NewLiteral<uint64_t>(value); });
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateMethodLiteral(MethodReference target_method,
+ MethodToLiteralMap* map) {
+ return map->GetOrCreate(
+ target_method,
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateMethodAddressLiteral(MethodReference target_method) {
+ return DeduplicateMethodLiteral(target_method, &method_patches_);
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateMethodCodeLiteral(MethodReference target_method) {
+ return DeduplicateMethodLiteral(target_method, &call_patches_);
+}
+
+void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
+ GpuRegister out) {
+ __ Bind(&info->pc_rel_label);
+ // Add the high half of a 32-bit offset to PC.
+ __ Auipc(out, /* placeholder */ 0x1234);
+ // The immediately following instruction will add the sign-extended low half of the 32-bit
+ // offset to `out` (e.g. ld, jialc, addiu).
+}
+
void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
// ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
blocked_core_registers_[ZERO] = true;
@@ -946,7 +1053,6 @@
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(entrypoint, instruction, slow_path);
- // TODO: anything related to T9/GP/GOT/PIC/.so's?
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
@@ -2636,14 +2742,17 @@
/* false_target */ nullptr);
}
-void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ LoadFromOffset(kLoadWord,
+ flag->GetLocations()->Out().AsRegister<GpuRegister>(),
+ SP,
+ codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
}
void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
@@ -2986,39 +3095,33 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
- switch (desired_dispatch_info.method_load_kind) {
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
- return HInvokeStaticOrDirect::DispatchInfo {
- HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u,
- 0u
- };
- default:
- break;
- }
- switch (desired_dispatch_info.code_ptr_location) {
- case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
- return HInvokeStaticOrDirect::DispatchInfo {
- desired_dispatch_info.method_load_kind,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- desired_dispatch_info.method_load_data,
- 0u
- };
- default:
- return desired_dispatch_info;
- }
+ // On MIPS64 we support all dispatch types.
+ return desired_dispatch_info;
}
void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// All registers are assumed to be correctly set up per the calling convention.
-
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
+ HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
+ HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
+
+ // For better instruction scheduling we load the direct code pointer before the method pointer.
+ switch (code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // T9 = invoke->GetDirectCodePtr();
+ __ LoadLiteral(T9, kLoadDoubleword, DeduplicateUint64Literal(invoke->GetDirectCodePtr()));
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // T9 = code address from literal pool with link-time patch.
+ __ LoadLiteral(T9,
+ kLoadUnsignedWord,
+ DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
+ break;
+ default:
+ break;
+ }
+
+ switch (method_load_kind) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
// temp = thread->string_init_entrypoint
uint32_t offset =
@@ -3033,14 +3136,23 @@
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
+ __ LoadLiteral(temp.AsRegister<GpuRegister>(),
+ kLoadDoubleword,
+ DeduplicateUint64Literal(invoke->GetMethodAddress()));
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement these types.
- // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
+ __ LoadLiteral(temp.AsRegister<GpuRegister>(),
+ kLoadUnsignedWord,
+ DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ uint32_t offset = invoke->GetDexCacheArrayOffset();
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset);
+ EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
GpuRegister reg = temp.AsRegister<GpuRegister>();
@@ -3071,23 +3183,25 @@
}
}
- switch (invoke->GetCodePtrLocation()) {
+ switch (code_ptr_location) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
- __ Jialc(&frame_entry_label_, T9);
+ __ Balc(&frame_entry_label_);
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
- // LR = invoke->GetDirectCodePtr();
- __ LoadConst64(T9, invoke->GetDirectCodePtr());
- // LR()
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // T9 prepared above for better instruction scheduling.
+ // T9()
__ Jalr(T9);
__ Nop();
break;
- case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement these types.
- // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ NewPcRelativeCallPatch(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
+ EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Jialc(AT, /* placeholder */ 0x5678);
+ break;
+ }
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// T9 = callee_method->entry_point_from_quick_compiled_code_;
__ LoadFromOffset(kLoadDoubleword,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 690eccb..067c1f9 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -279,6 +279,9 @@
Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+ // Emit linker patches.
+ void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+
void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
// Register allocation.
@@ -357,7 +360,44 @@
void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays,
+ // boot image strings and method calls. The only difference is the interpretation of
+ // the offset_or_index.
+ struct PcRelativePatchInfo {
+ PcRelativePatchInfo(const DexFile& dex_file, uint32_t off_or_idx)
+ : target_dex_file(dex_file), offset_or_index(off_or_idx) { }
+ PcRelativePatchInfo(PcRelativePatchInfo&& other) = default;
+
+ const DexFile& target_dex_file;
+ // Either the dex cache array element offset or the string/type/method index.
+ uint32_t offset_or_index;
+ // Label for the auipc instruction.
+ Mips64Label pc_rel_label;
+ };
+
+ PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
+ uint32_t element_offset);
+ PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
+ uint32_t method_index);
+
+ void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out);
+
private:
+ using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
+ using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
+ Literal* DeduplicateUint64Literal(uint64_t value);
+ Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
+ Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
+ Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
+
+ PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
+ uint32_t offset_or_index,
+ ArenaDeque<PcRelativePatchInfo>* patches);
+
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
+
// Labels for each block that will be compiled.
Mips64Label* block_labels_; // Indexed by block id.
Mips64Label frame_entry_label_;
@@ -367,6 +407,16 @@
Mips64Assembler assembler_;
const Mips64InstructionSetFeatures& isa_features_;
+ // Deduplication map for 64-bit literals, used for non-patchable method address or method code
+ // address.
+ Uint64ToLiteralMap uint64_literals_;
+ // Method patch info, map MethodReference to a literal for method address and method code.
+ MethodToLiteralMap method_patches_;
+ MethodToLiteralMap call_patches_;
+ // PC-relative patch info.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
+ ArenaDeque<PcRelativePatchInfo> relative_call_patches_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
};
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 433dced..95551c8 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -47,6 +47,9 @@
using namespace vixl::aarch32; // NOLINT(build/namespaces)
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
ArmVIXLAssembler* IntrinsicCodeGeneratorARMVIXL::GetAssembler() {
return codegen_->GetAssembler();
}
@@ -467,9 +470,9 @@
__ Cmp(op1, op2);
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 3 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 3 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ite(is_min ? lt : gt);
__ mov(is_min ? lt : gt, out, op1);
@@ -1050,9 +1053,9 @@
__ Subs(tmp, tmp, expected);
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 3 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 3 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ itt(eq);
__ strex(eq, tmp, value, MemOperand(tmp_ptr));
@@ -1066,9 +1069,9 @@
__ Rsbs(out, tmp, 1);
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(cc);
__ mov(cc, out, 0);
@@ -1185,9 +1188,9 @@
// temp0 = min(len(str), len(arg)).
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(gt);
__ mov(gt, temp0, temp1);
@@ -1207,9 +1210,9 @@
// This could in theory exceed INT32_MAX, so treat temp0 as unsigned.
__ Lsls(temp3, temp3, 31u); // Extract purely the compression flag.
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(ne);
__ add(ne, temp0, temp0, temp0);
@@ -1324,9 +1327,9 @@
__ Mov(temp2, arg);
__ Lsrs(temp3, temp3, 1u); // Continue the move of the compression flag.
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 3 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 3 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ itt(cs); // Interleave with selection of temp1 and temp2.
__ mov(cs, temp1, arg); // Preserves flags.
__ mov(cs, temp2, str); // Preserves flags.
@@ -1361,9 +1364,9 @@
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(cc);
__ rsb(cc, out, out, 0);
}
@@ -1457,9 +1460,9 @@
// For string compression, calculate the number of bytes to compare (not chars).
// This could in theory exceed INT32_MAX, so treat temp as unsigned.
__ Lsrs(temp, temp, 1u); // Extract length and check compression flag.
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(cs); // If uncompressed,
__ add(cs, temp, temp, temp); // double the byte count.
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 925d4f1..1e946d6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2440,6 +2440,17 @@
}
}
+// Helper for InstructionDataEquals to fetch the mirror Class out
+// from a kJitTableAddress LoadClass kind.
+// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
+// mirrors, they are stored in a variable size handle scope which is always
+// visited during a pause. Also, the only caller of this helper
+// only uses the mirror for pointer comparison.
+static inline mirror::Class* AsMirrorInternal(uint64_t address)
+ NO_THREAD_SAFETY_ANALYSIS {
+ return reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr();
+}
+
bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
const HLoadClass* other_load_class = other->AsLoadClass();
// TODO: To allow GVN for HLoadClass from different dex files, we should compare the type
@@ -2448,16 +2459,14 @@
GetPackedFields() != other_load_class->GetPackedFields()) {
return false;
}
- LoadKind load_kind = GetLoadKind();
- if (HasAddress(load_kind)) {
- return GetAddress() == other_load_class->GetAddress();
- } else if (HasTypeReference(load_kind)) {
- return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
- } else {
- DCHECK(HasDexCacheReference(load_kind)) << load_kind;
- // If the type indexes and dex files are the same, dex cache element offsets
- // must also be the same, so we don't need to compare them.
- return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
+ switch (GetLoadKind()) {
+ case LoadKind::kBootImageAddress:
+ return GetAddress() == other_load_class->GetAddress();
+ case LoadKind::kJitTableAddress:
+ return AsMirrorInternal(GetAddress()) == AsMirrorInternal(other_load_class->GetAddress());
+ default:
+ DCHECK(HasTypeReference(GetLoadKind()) || HasDexCacheReference(GetLoadKind()));
+ return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
}
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index bbbb1a1..91efb80 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -197,7 +197,9 @@
// out which class loader to use.
address = reinterpret_cast<uint64_t>(handles_->NewHandle(klass).GetReference());
} else {
- // Class not loaded yet. Fallback to the dex cache.
+ // Class not loaded yet. This happens when the dex code requesting
+ // this `HLoadClass` hasn't been executed in the interpreter.
+ // Fallback to the dex cache.
// TODO(ngeoffray): Generate HDeoptimize instead.
desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
}
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 1614d04..76a94e8 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -23,6 +23,9 @@
using namespace vixl::aarch32; // NOLINT(build/namespaces)
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
namespace art {
namespace arm {
@@ -459,9 +462,9 @@
if (!label->IsBound()) {
// Try to use 16-bit T2 encoding of B instruction.
DCHECK(OutsideITBlock());
- AssemblerAccurateScope ass(this,
- kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope ass(this,
+ kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
b(al, Narrow, label);
AddBranchLabel(label);
return;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 2d026b8..4e64f13 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -24,6 +24,9 @@
using namespace vixl::aarch32; // NOLINT(build/namespaces)
namespace vixl32 = vixl::aarch32;
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
namespace art {
namespace arm {
@@ -455,16 +458,16 @@
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
if (!out_reg.Equals(in_reg)) {
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ 3 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ it(eq, 0xc);
___ mov(eq, out_reg.AsVIXLRegister(), 0);
asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
} else {
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
}
@@ -493,9 +496,9 @@
___ Cmp(scratch.AsVIXLRegister(), 0);
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
asm_.AddConstantInIt(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
} else {
@@ -586,9 +589,9 @@
___ Cmp(scratch.AsVIXLRegister(), 0);
{
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ b(ne, Narrow, exception_blocks_.back()->Entry());
}
// TODO: think about using CBNZ here.
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 1a21df9..84280b9 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -35,6 +35,7 @@
for (auto& exception_block : exception_blocks_) {
EmitExceptionPoll(&exception_block);
}
+ EmitLiterals();
PromoteBranches();
}
@@ -450,6 +451,21 @@
EmitI(0x27, rs, rt, imm16);
}
+void Mips64Assembler::Lwpc(GpuRegister rs, uint32_t imm19) {
+ CHECK(IsUint<19>(imm19)) << imm19;
+ EmitI21(0x3B, rs, (0x01 << 19) | imm19);
+}
+
+void Mips64Assembler::Lwupc(GpuRegister rs, uint32_t imm19) {
+ CHECK(IsUint<19>(imm19)) << imm19;
+ EmitI21(0x3B, rs, (0x02 << 19) | imm19);
+}
+
+void Mips64Assembler::Ldpc(GpuRegister rs, uint32_t imm18) {
+ CHECK(IsUint<18>(imm18)) << imm18;
+ EmitI21(0x3B, rs, (0x06 << 18) | imm18);
+}
+
void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
}
@@ -548,6 +564,10 @@
EmitI26(0x32, imm26);
}
+void Mips64Assembler::Balc(uint32_t imm26) {
+ EmitI26(0x3A, imm26);
+}
+
void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
}
@@ -1064,19 +1084,37 @@
type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
}
-void Mips64Assembler::Branch::InitializeType(bool is_call) {
+void Mips64Assembler::Branch::InitializeType(Type initial_type) {
OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
- if (is_call) {
- InitShortOrLong(offset_size, kCall, kLongCall);
- } else if (condition_ == kUncond) {
- InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
- } else {
- if (condition_ == kCondEQZ || condition_ == kCondNEZ) {
- // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
- type_ = (offset_size <= kOffset23) ? kCondBranch : kLongCondBranch;
- } else {
- InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
- }
+ switch (initial_type) {
+ case kLabel:
+ case kLiteral:
+ case kLiteralUnsigned:
+ case kLiteralLong:
+ CHECK(!IsResolved());
+ type_ = initial_type;
+ break;
+ case kCall:
+ InitShortOrLong(offset_size, kCall, kLongCall);
+ break;
+ case kCondBranch:
+ switch (condition_) {
+ case kUncond:
+ InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
+ break;
+ case kCondEQZ:
+ case kCondNEZ:
+ // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
+ type_ = (offset_size <= kOffset23) ? kCondBranch : kLongCondBranch;
+ break;
+ default:
+ InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch type " << initial_type;
+ UNREACHABLE();
}
old_type_ = type_;
}
@@ -1109,14 +1147,14 @@
}
}
-Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target)
+Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, bool is_call)
: old_location_(location),
location_(location),
target_(target),
lhs_reg_(ZERO),
rhs_reg_(ZERO),
condition_(kUncond) {
- InitializeType(false);
+ InitializeType(is_call ? kCall : kCondBranch);
}
Mips64Assembler::Branch::Branch(uint32_t location,
@@ -1164,19 +1202,18 @@
// Branch condition is always true, make the branch unconditional.
condition_ = kUncond;
}
- InitializeType(false);
+ InitializeType(kCondBranch);
}
-Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, GpuRegister indirect_reg)
+Mips64Assembler::Branch::Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type)
: old_location_(location),
location_(location),
- target_(target),
- lhs_reg_(indirect_reg),
+ target_(kUnresolved),
+ lhs_reg_(dest_reg),
rhs_reg_(ZERO),
condition_(kUncond) {
- CHECK_NE(indirect_reg, ZERO);
- CHECK_NE(indirect_reg, AT);
- InitializeType(true);
+ CHECK_NE(dest_reg, ZERO);
+ InitializeType(label_or_literal_type);
}
Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
@@ -1278,11 +1315,23 @@
case kUncondBranch:
case kCondBranch:
case kCall:
+ // Near label.
+ case kLabel:
+ // Near literals.
+ case kLiteral:
+ case kLiteralUnsigned:
+ case kLiteralLong:
return false;
// Long branches.
case kLongUncondBranch:
case kLongCondBranch:
case kLongCall:
+ // Far label.
+ case kFarLabel:
+ // Far literals.
+ case kFarLiteral:
+ case kFarLiteralUnsigned:
+ case kFarLiteralLong:
return true;
}
UNREACHABLE();
@@ -1351,6 +1400,20 @@
case kCall:
type_ = kLongCall;
break;
+ // Near label.
+ case kLabel:
+ type_ = kFarLabel;
+ break;
+ // Near literals.
+ case kLiteral:
+ type_ = kFarLiteral;
+ break;
+ case kLiteralUnsigned:
+ type_ = kFarLiteralUnsigned;
+ break;
+ case kLiteralLong:
+ type_ = kFarLiteralLong;
+ break;
default:
// Note: 'type_' is already long.
break;
@@ -1397,7 +1460,15 @@
uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
// Calculate the byte distance between instructions and also account for
// different PC-relative origins.
- uint32_t offset = target_ - GetOffsetLocation() - branch_info_[type_].pc_org * sizeof(uint32_t);
+ uint32_t offset_location = GetOffsetLocation();
+ if (type_ == kLiteralLong) {
+ // Special case for the ldpc instruction, whose address (PC) is rounded down to
+ // a multiple of 8 before adding the offset.
+ // Note, branch promotion has already taken care of aligning `target_` to an
+ // address that's a multiple of 8.
+ offset_location = RoundDown(offset_location, sizeof(uint64_t));
+ }
+ uint32_t offset = target_ - offset_location - branch_info_[type_].pc_org * sizeof(uint32_t);
// Prepare the offset for encoding into the instruction(s).
offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
return offset;
@@ -1444,7 +1515,7 @@
label->BindTo(bound_pc);
}
-uint32_t Mips64Assembler::GetLabelLocation(Mips64Label* label) const {
+uint32_t Mips64Assembler::GetLabelLocation(const Mips64Label* label) const {
CHECK(label->IsBound());
uint32_t target = label->Position();
if (label->prev_branch_id_plus_one_) {
@@ -1500,7 +1571,7 @@
void Mips64Assembler::Buncond(Mips64Label* label) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call */ false);
FinalizeLabeledBranch(label);
}
@@ -1517,12 +1588,87 @@
FinalizeLabeledBranch(label);
}
-void Mips64Assembler::Call(Mips64Label* label, GpuRegister indirect_reg) {
+void Mips64Assembler::Call(Mips64Label* label) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target, indirect_reg);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call */ true);
FinalizeLabeledBranch(label);
}
+void Mips64Assembler::LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label) {
+ // Label address loads are treated as pseudo branches since they require very similar handling.
+ DCHECK(!label->IsBound());
+ branches_.emplace_back(buffer_.Size(), dest_reg, Branch::kLabel);
+ FinalizeLabeledBranch(label);
+}
+
+Literal* Mips64Assembler::NewLiteral(size_t size, const uint8_t* data) {
+ // We don't support byte and half-word literals.
+ if (size == 4u) {
+ literals_.emplace_back(size, data);
+ return &literals_.back();
+ } else {
+ DCHECK_EQ(size, 8u);
+ long_literals_.emplace_back(size, data);
+ return &long_literals_.back();
+ }
+}
+
+void Mips64Assembler::LoadLiteral(GpuRegister dest_reg,
+ LoadOperandType load_type,
+ Literal* literal) {
+ // Literal loads are treated as pseudo branches since they require very similar handling.
+ Branch::Type literal_type;
+ switch (load_type) {
+ case kLoadWord:
+ DCHECK_EQ(literal->GetSize(), 4u);
+ literal_type = Branch::kLiteral;
+ break;
+ case kLoadUnsignedWord:
+ DCHECK_EQ(literal->GetSize(), 4u);
+ literal_type = Branch::kLiteralUnsigned;
+ break;
+ case kLoadDoubleword:
+ DCHECK_EQ(literal->GetSize(), 8u);
+ literal_type = Branch::kLiteralLong;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected literal load type " << load_type;
+ UNREACHABLE();
+ }
+ Mips64Label* label = literal->GetLabel();
+ DCHECK(!label->IsBound());
+ branches_.emplace_back(buffer_.Size(), dest_reg, literal_type);
+ FinalizeLabeledBranch(label);
+}
+
+void Mips64Assembler::EmitLiterals() {
+ if (!literals_.empty()) {
+ for (Literal& literal : literals_) {
+ Mips64Label* label = literal.GetLabel();
+ Bind(label);
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ DCHECK_EQ(literal.GetSize(), 4u);
+ for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
+ buffer_.Emit<uint8_t>(literal.GetData()[i]);
+ }
+ }
+ }
+ if (!long_literals_.empty()) {
+ // Reserve 4 bytes for potential alignment. If after the branch promotion the 64-bit
+ // literals don't end up 8-byte-aligned, they will be moved down 4 bytes.
+ Emit(0); // NOP.
+ for (Literal& literal : long_literals_) {
+ Mips64Label* label = literal.GetLabel();
+ Bind(label);
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ DCHECK_EQ(literal.GetSize(), 8u);
+ for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
+ buffer_.Emit<uint8_t>(literal.GetData()[i]);
+ }
+ }
+ }
+}
+
void Mips64Assembler::PromoteBranches() {
// Promote short branches to long as necessary.
bool changed;
@@ -1561,6 +1707,35 @@
end = branch.GetOldLocation();
}
}
+
+ // Align 64-bit literals by moving them down by 4 bytes if needed.
+ // This will reduce the PC-relative distance, which should be safe for both near and far literals.
+ if (!long_literals_.empty()) {
+ uint32_t first_literal_location = GetLabelLocation(long_literals_.front().GetLabel());
+ size_t lit_size = long_literals_.size() * sizeof(uint64_t);
+ size_t buf_size = buffer_.Size();
+ // 64-bit literals must be at the very end of the buffer.
+ CHECK_EQ(first_literal_location + lit_size, buf_size);
+ if (!IsAligned<sizeof(uint64_t)>(first_literal_location)) {
+ buffer_.Move(first_literal_location - sizeof(uint32_t), first_literal_location, lit_size);
+ // The 4 reserved bytes proved useless, reduce the buffer size.
+ buffer_.Resize(buf_size - sizeof(uint32_t));
+ // Reduce target addresses in literal and address loads by 4 bytes in order for correct
+ // offsets from PC to be generated.
+ for (auto& branch : branches_) {
+ uint32_t target = branch.GetTarget();
+ if (target >= first_literal_location) {
+ branch.Resolve(target - sizeof(uint32_t));
+ }
+ }
+ // If after this we ever call GetLabelLocation() to get the location of a 64-bit literal,
+ // we need to adjust the location of the literal's label as well.
+ for (Literal& literal : long_literals_) {
+ // Bound label's position is negative, hence incrementing it instead of decrementing.
+ literal.GetLabel()->position_ += sizeof(uint32_t);
+ }
+ }
+ }
}
// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
@@ -1569,11 +1744,23 @@
{ 1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 }, // kUncondBranch
{ 2, 0, 1, Mips64Assembler::Branch::kOffset18, 2 }, // kCondBranch
// Exception: kOffset23 for beqzc/bnezc
- { 2, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kCall
+ { 1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 }, // kCall
+ // Near label.
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kLabel
+ // Near literals.
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kLiteral
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kLiteralUnsigned
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 3 }, // kLiteralLong
// Long branches.
{ 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongUncondBranch
{ 3, 1, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCondBranch
- { 3, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCall
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCall
+ // Far label.
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLabel
+ // Far literals.
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLiteral
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLiteralUnsigned
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLiteralLong
};
// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
@@ -1597,8 +1784,26 @@
break;
case Branch::kCall:
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Balc(offset);
+ break;
+
+ // Near label.
+ case Branch::kLabel:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Addiupc(lhs, offset);
- Jialc(lhs, 0);
+ break;
+ // Near literals.
+ case Branch::kLiteral:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lwpc(lhs, offset);
+ break;
+ case Branch::kLiteralUnsigned:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lwupc(lhs, offset);
+ break;
+ case Branch::kLiteralLong:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Ldpc(lhs, offset);
break;
// Long branches.
@@ -1616,11 +1821,37 @@
Jic(AT, Low16Bits(offset));
break;
case Branch::kLongCall:
- offset += (offset & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jialc.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
- Auipc(lhs, High16Bits(offset));
- Daddiu(lhs, lhs, Low16Bits(offset));
- Jialc(lhs, 0);
+ Auipc(AT, High16Bits(offset));
+ Jialc(AT, Low16Bits(offset));
+ break;
+
+ // Far label.
+ case Branch::kFarLabel:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in addiu.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Addiu(lhs, AT, Low16Bits(offset));
+ break;
+ // Far literals.
+ case Branch::kFarLiteral:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in lw.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Lw(lhs, AT, Low16Bits(offset));
+ break;
+ case Branch::kFarLiteralUnsigned:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in lwu.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Lwu(lhs, AT, Low16Bits(offset));
+ break;
+ case Branch::kFarLiteralLong:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in ld.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Ld(lhs, AT, Low16Bits(offset));
break;
}
CHECK_EQ(overwrite_location_, branch->GetEndLocation());
@@ -1631,8 +1862,8 @@
Buncond(label);
}
-void Mips64Assembler::Jialc(Mips64Label* label, GpuRegister indirect_reg) {
- Call(label, indirect_reg);
+void Mips64Assembler::Balc(Mips64Label* label) {
+ Call(label);
}
void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 238cb9d..08a55ed 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -17,9 +17,11 @@
#ifndef ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
#define ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
+#include <deque>
#include <utility>
#include <vector>
+#include "base/arena_containers.h"
#include "base/enums.h"
#include "base/macros.h"
#include "constants_mips64.h"
@@ -312,6 +314,49 @@
DISALLOW_COPY_AND_ASSIGN(Mips64Label);
};
+// Assembler literal is a value embedded in code, retrieved using a PC-relative load.
+class Literal {
+ public:
+ static constexpr size_t kMaxSize = 8;
+
+ Literal(uint32_t size, const uint8_t* data)
+ : label_(), size_(size) {
+ DCHECK_LE(size, Literal::kMaxSize);
+ memcpy(data_, data, size);
+ }
+
+ template <typename T>
+ T GetValue() const {
+ DCHECK_EQ(size_, sizeof(T));
+ T value;
+ memcpy(&value, data_, sizeof(T));
+ return value;
+ }
+
+ uint32_t GetSize() const {
+ return size_;
+ }
+
+ const uint8_t* GetData() const {
+ return data_;
+ }
+
+ Mips64Label* GetLabel() {
+ return &label_;
+ }
+
+ const Mips64Label* GetLabel() const {
+ return &label_;
+ }
+
+ private:
+ Mips64Label label_;
+ const uint32_t size_;
+ uint8_t data_[kMaxSize];
+
+ DISALLOW_COPY_AND_ASSIGN(Literal);
+};
+
// Slowpath entered when Thread::Current()->_exception is non-null.
class Mips64ExceptionSlowPath {
public:
@@ -341,6 +386,8 @@
: Assembler(arena),
overwriting_(false),
overwrite_location_(0),
+ literals_(arena->Adapter(kArenaAllocAssembler)),
+ long_literals_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0) {
@@ -386,18 +433,18 @@
void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Bitswap(GpuRegister rd, GpuRegister rt);
- void Dbitswap(GpuRegister rd, GpuRegister rt);
+ void Dbitswap(GpuRegister rd, GpuRegister rt); // MIPS64
void Seb(GpuRegister rd, GpuRegister rt);
void Seh(GpuRegister rd, GpuRegister rt);
- void Dsbh(GpuRegister rd, GpuRegister rt);
- void Dshd(GpuRegister rd, GpuRegister rt);
+ void Dsbh(GpuRegister rd, GpuRegister rt); // MIPS64
+ void Dshd(GpuRegister rd, GpuRegister rt); // MIPS64
void Dext(GpuRegister rs, GpuRegister rt, int pos, int size); // MIPS64
void Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size); // MIPS64
void Wsbh(GpuRegister rd, GpuRegister rt);
void Sc(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
- void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
+ void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0); // MIPS64
void Ll(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
- void Lld(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
+ void Lld(GpuRegister rt, GpuRegister base, int16_t imm9 = 0); // MIPS64
void Sll(GpuRegister rd, GpuRegister rt, int shamt);
void Srl(GpuRegister rd, GpuRegister rt, int shamt);
@@ -409,7 +456,7 @@
void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Dsll(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
- void Drotr(GpuRegister rd, GpuRegister rt, int shamt);
+ void Drotr(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsra(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsll32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
@@ -427,6 +474,9 @@
void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
+ void Lwpc(GpuRegister rs, uint32_t imm19);
+ void Lwupc(GpuRegister rs, uint32_t imm19); // MIPS64
+ void Ldpc(GpuRegister rs, uint32_t imm18); // MIPS64
void Lui(GpuRegister rt, uint16_t imm16);
void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64
void Dati(GpuRegister rs, uint16_t imm16); // MIPS64
@@ -445,8 +495,8 @@
void Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Clz(GpuRegister rd, GpuRegister rs);
void Clo(GpuRegister rd, GpuRegister rs);
- void Dclz(GpuRegister rd, GpuRegister rs);
- void Dclo(GpuRegister rd, GpuRegister rs);
+ void Dclz(GpuRegister rd, GpuRegister rs); // MIPS64
+ void Dclo(GpuRegister rd, GpuRegister rs); // MIPS64
void Jalr(GpuRegister rd, GpuRegister rs);
void Jalr(GpuRegister rs);
@@ -454,6 +504,7 @@
void Auipc(GpuRegister rs, uint16_t imm16);
void Addiupc(GpuRegister rs, uint32_t imm19);
void Bc(uint32_t imm26);
+ void Balc(uint32_t imm26);
void Jic(GpuRegister rt, uint16_t imm16);
void Jialc(GpuRegister rt, uint16_t imm16);
void Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
@@ -605,8 +656,26 @@
UNREACHABLE();
}
+ // Create a new literal with a given value.
+ // NOTE: Force the template parameter to be explicitly specified.
+ template <typename T>
+ Literal* NewLiteral(typename Identity<T>::type value) {
+ static_assert(std::is_integral<T>::value, "T must be an integral type.");
+ return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
+ }
+
+ // Load label address using PC-relative loads. To be used with data labels in the literal /
+ // jump table area only and not with regular code labels.
+ void LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label);
+
+ // Create a new literal with the given data.
+ Literal* NewLiteral(size_t size, const uint8_t* data);
+
+ // Load literal using PC-relative loads.
+ void LoadLiteral(GpuRegister dest_reg, LoadOperandType load_type, Literal* literal);
+
void Bc(Mips64Label* label);
- void Jialc(Mips64Label* label, GpuRegister indirect_reg);
+ void Balc(Mips64Label* label);
void Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
void Bltzc(GpuRegister rt, Mips64Label* label);
void Bgtzc(GpuRegister rt, Mips64Label* label);
@@ -756,12 +825,15 @@
// Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS64,
// must be used instead of Mips64Label::GetPosition()).
- uint32_t GetLabelLocation(Mips64Label* label) const;
+ uint32_t GetLabelLocation(const Mips64Label* label) const;
// Get the final position of a label after local fixup based on the old position
// recorded before FinalizeCode().
uint32_t GetAdjustedPosition(uint32_t old_position);
+ // Note that PC-relative literal loads are handled as pseudo branches because they need very
+ // similar relocation and may similarly expand in size to accomodate for larger offsets relative
+ // to PC.
enum BranchCondition {
kCondLT,
kCondGE,
@@ -791,10 +863,22 @@
kUncondBranch,
kCondBranch,
kCall,
+ // Near label.
+ kLabel,
+ // Near literals.
+ kLiteral,
+ kLiteralUnsigned,
+ kLiteralLong,
// Long branches.
kLongUncondBranch,
kLongCondBranch,
kLongCall,
+ // Far label.
+ kFarLabel,
+ // Far literals.
+ kFarLiteral,
+ kFarLiteralUnsigned,
+ kFarLiteralLong,
};
// Bit sizes of offsets defined as enums to minimize chance of typos.
@@ -830,16 +914,16 @@
};
static const BranchInfo branch_info_[/* Type */];
- // Unconditional branch.
- Branch(uint32_t location, uint32_t target);
+ // Unconditional branch or call.
+ Branch(uint32_t location, uint32_t target, bool is_call);
// Conditional branch.
Branch(uint32_t location,
uint32_t target,
BranchCondition condition,
GpuRegister lhs_reg,
- GpuRegister rhs_reg = ZERO);
- // Call (branch and link) that stores the target address in a given register (i.e. T9).
- Branch(uint32_t location, uint32_t target, GpuRegister indirect_reg);
+ GpuRegister rhs_reg);
+ // Label address (in literal area) or literal.
+ Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type);
// Some conditional branches with lhs = rhs are effectively NOPs, while some
// others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
@@ -923,7 +1007,7 @@
private:
// Completes branch construction by determining and recording its type.
- void InitializeType(bool is_call);
+ void InitializeType(Type initial_type);
// Helper for the above.
void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
@@ -932,7 +1016,7 @@
uint32_t target_; // Offset into assembler buffer in bytes.
GpuRegister lhs_reg_; // Left-hand side register in conditional branches or
- // indirect call register.
+ // destination register in literals.
GpuRegister rhs_reg_; // Right-hand side register in conditional branches.
BranchCondition condition_; // Condition for conditional branches.
@@ -957,12 +1041,13 @@
BranchCondition condition,
GpuRegister lhs,
GpuRegister rhs = ZERO);
- void Call(Mips64Label* label, GpuRegister indirect_reg);
+ void Call(Mips64Label* label);
void FinalizeLabeledBranch(Mips64Label* label);
Branch* GetBranch(uint32_t branch_id);
const Branch* GetBranch(uint32_t branch_id) const;
+ void EmitLiterals();
void PromoteBranches();
void EmitBranch(Branch* branch);
void EmitBranches();
@@ -981,6 +1066,11 @@
// The current overwrite location.
uint32_t overwrite_location_;
+ // Use std::deque<> for literal labels to allow insertions at the end
+ // without invalidating pointers and references to existing elements.
+ ArenaDeque<Literal> literals_;
+ ArenaDeque<Literal> long_literals_; // 64-bit literals separated for alignment reasons.
+
// Data for AdjustedPosition(), see the description there.
uint32_t last_position_adjustment_;
uint32_t last_old_position_;
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index ba8f25e..f62822d 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -576,83 +576,83 @@
RepeatRRNoDupes(&mips64::Mips64Assembler::Jalr, "jalr ${reg1}, ${reg2}"), "jalr");
}
-TEST_F(AssemblerMIPS64Test, Jialc) {
+TEST_F(AssemblerMIPS64Test, Balc) {
mips64::Mips64Label label1, label2;
- __ Jialc(&label1, mips64::T9);
+ __ Balc(&label1);
constexpr size_t kAdduCount1 = 63;
for (size_t i = 0; i != kAdduCount1; ++i) {
__ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
}
__ Bind(&label1);
- __ Jialc(&label2, mips64::T9);
+ __ Balc(&label2);
constexpr size_t kAdduCount2 = 64;
for (size_t i = 0; i != kAdduCount2; ++i) {
__ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
}
__ Bind(&label2);
- __ Jialc(&label1, mips64::T9);
+ __ Balc(&label1);
std::string expected =
".set noreorder\n"
- "lapc $t9, 1f\n"
- "jialc $t9, 0\n" +
+ "balc 1f\n" +
RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
"1:\n"
- "lapc $t9, 2f\n"
- "jialc $t9, 0\n" +
+ "balc 2f\n" +
RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
"2:\n"
- "lapc $t9, 1b\n"
- "jialc $t9, 0\n";
- DriverStr(expected, "Jialc");
+ "balc 1b\n";
+ DriverStr(expected, "Balc");
}
-TEST_F(AssemblerMIPS64Test, LongJialc) {
+TEST_F(AssemblerMIPS64Test, LongBalc) {
+ constexpr uint32_t kNopCount1 = (1u << 25) + 1;
+ constexpr uint32_t kNopCount2 = (1u << 25) + 1;
+ constexpr uint32_t kRequiredCapacity = (kNopCount1 + kNopCount2 + 6u) * 4u;
+ ASSERT_LT(__ GetBuffer()->Capacity(), kRequiredCapacity);
+ __ GetBuffer()->ExtendCapacity(kRequiredCapacity);
mips64::Mips64Label label1, label2;
- __ Jialc(&label1, mips64::T9);
- constexpr uint32_t kAdduCount1 = (1u << 18) + 1;
- for (uint32_t i = 0; i != kAdduCount1; ++i) {
- __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ __ Balc(&label1);
+ for (uint32_t i = 0; i != kNopCount1; ++i) {
+ __ Nop();
}
__ Bind(&label1);
- __ Jialc(&label2, mips64::T9);
- constexpr uint32_t kAdduCount2 = (1u << 18) + 1;
- for (uint32_t i = 0; i != kAdduCount2; ++i) {
- __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ __ Balc(&label2);
+ for (uint32_t i = 0; i != kNopCount2; ++i) {
+ __ Nop();
}
__ Bind(&label2);
- __ Jialc(&label1, mips64::T9);
+ __ Balc(&label1);
- uint32_t offset_forward1 = 3 + kAdduCount1; // 3: account for auipc, daddiu and jic.
+ uint32_t offset_forward1 = 2 + kNopCount1; // 2: account for auipc and jialc.
offset_forward1 <<= 2;
- offset_forward1 += (offset_forward1 & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset_forward1 += (offset_forward1 & 0x8000) << 1; // Account for sign extension in jialc.
- uint32_t offset_forward2 = 3 + kAdduCount2; // 3: account for auipc, daddiu and jic.
+ uint32_t offset_forward2 = 2 + kNopCount2; // 2: account for auipc and jialc.
offset_forward2 <<= 2;
- offset_forward2 += (offset_forward2 & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset_forward2 += (offset_forward2 & 0x8000) << 1; // Account for sign extension in jialc.
- uint32_t offset_back = -(3 + kAdduCount2); // 3: account for auipc, daddiu and jic.
+ uint32_t offset_back = -(2 + kNopCount2); // 2: account for auipc and jialc.
offset_back <<= 2;
- offset_back += (offset_back & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset_back += (offset_back & 0x8000) << 1; // Account for sign extension in jialc.
+ // Note, we're using the ".fill" directive to tell the assembler to generate many NOPs
+ // instead of generating them ourselves in the source code. This saves a few minutes
+ // of test time.
std::ostringstream oss;
oss <<
".set noreorder\n"
- "auipc $t9, 0x" << std::hex << High16Bits(offset_forward1) << "\n"
- "daddiu $t9, 0x" << std::hex << Low16Bits(offset_forward1) << "\n"
- "jialc $t9, 0\n" <<
- RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
+ "auipc $at, 0x" << std::hex << High16Bits(offset_forward1) << "\n"
+ "jialc $at, 0x" << std::hex << Low16Bits(offset_forward1) << "\n"
+ ".fill 0x" << std::hex << kNopCount1 << " , 4, 0\n"
"1:\n"
- "auipc $t9, 0x" << std::hex << High16Bits(offset_forward2) << "\n"
- "daddiu $t9, 0x" << std::hex << Low16Bits(offset_forward2) << "\n"
- "jialc $t9, 0\n" <<
- RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
+ "auipc $at, 0x" << std::hex << High16Bits(offset_forward2) << "\n"
+ "jialc $at, 0x" << std::hex << Low16Bits(offset_forward2) << "\n"
+ ".fill 0x" << std::hex << kNopCount2 << " , 4, 0\n"
"2:\n"
- "auipc $t9, 0x" << std::hex << High16Bits(offset_back) << "\n"
- "daddiu $t9, 0x" << std::hex << Low16Bits(offset_back) << "\n"
- "jialc $t9, 0\n";
+ "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
+ "jialc $at, 0x" << std::hex << Low16Bits(offset_back) << "\n";
std::string expected = oss.str();
- DriverStr(expected, "LongJialc");
+ DriverStr(expected, "LongBalc");
}
TEST_F(AssemblerMIPS64Test, Bc) {
@@ -827,6 +827,258 @@
// MISC //
//////////
+TEST_F(AssemblerMIPS64Test, Lwpc) {
+ // Lwpc() takes an unsigned 19-bit immediate, while the GNU assembler needs a signed offset,
+ // hence the sign extension from bit 18 with `imm - ((imm & 0x40000) << 1)`.
+ // The GNU assembler also wants the offset to be a multiple of 4, which it will shift right
+ // by 2 positions when encoding, hence `<< 2` to compensate for that shift.
+ // We capture the value of the immediate with `.set imm, {imm}` because the value is needed
+ // twice for the sign extension, but `{imm}` is substituted only once.
+ const char* code = ".set imm, {imm}\nlw ${reg}, ((imm - ((imm & 0x40000) << 1)) << 2)($pc)";
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lwpc, 19, code), "Lwpc");
+}
+
+TEST_F(AssemblerMIPS64Test, Lwupc) {
+ // The comment for the Lwpc test applies here as well.
+ const char* code = ".set imm, {imm}\nlwu ${reg}, ((imm - ((imm & 0x40000) << 1)) << 2)($pc)";
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lwupc, 19, code), "Lwupc");
+}
+
+TEST_F(AssemblerMIPS64Test, Ldpc) {
+ // The comment for the Lwpc test applies here as well.
+ const char* code = ".set imm, {imm}\nld ${reg}, ((imm - ((imm & 0x20000) << 1)) << 3)($pc)";
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Ldpc, 18, code), "Ldpc");
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLabelAddress) {
+ mips64::Mips64Label label;
+ __ LoadLabelAddress(mips64::V0, &label);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "lapc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n";
+ DriverStr(expected, "LoadFarthestNearLabelAddress");
+ EXPECT_EQ(__ GetLabelLocation(&label), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLabelAddress) {
+ mips64::Mips64Label label;
+ __ LoadLabelAddress(mips64::V0, &label);
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "addiu $v0, $at, %lo(2f - 1b)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n";
+ DriverStr(expected, "LoadNearestFarLabelAddress");
+ EXPECT_EQ(__ GetLabelLocation(&label), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteral) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "lwpc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadFarthestNearLiteral");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteral) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "lw $v0, %lo(2f - 1b)($at)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadNearestFarLiteral");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteralUnsigned) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadUnsignedWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "lwupc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadFarthestNearLiteralUnsigned");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteralUnsigned) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadUnsignedWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "lwu $v0, %lo(2f - 1b)($at)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadNearestFarLiteralUnsigned");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteralLong) {
+ mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDD;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "ldpc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ ".dword 0x0123456789ABCDEF\n";
+ DriverStr(expected, "LoadFarthestNearLiteralLong");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteralLong) {
+ mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "ld $v0, %lo(2f - 1b)($at)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ ".dword 0x0123456789ABCDEF\n";
+ DriverStr(expected, "LoadNearestFarLiteralLong");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LongLiteralAlignmentNop) {
+ mips64::Literal* literal1 = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ mips64::Literal* literal2 = __ NewLiteral<uint64_t>(UINT64_C(0x5555555555555555));
+ mips64::Literal* literal3 = __ NewLiteral<uint64_t>(UINT64_C(0xAAAAAAAAAAAAAAAA));
+ __ LoadLiteral(mips64::A1, mips64::kLoadDoubleword, literal1);
+ __ LoadLiteral(mips64::A2, mips64::kLoadDoubleword, literal2);
+ __ LoadLiteral(mips64::A3, mips64::kLoadDoubleword, literal3);
+ __ LoadLabelAddress(mips64::V0, literal1->GetLabel());
+ __ LoadLabelAddress(mips64::V1, literal2->GetLabel());
+ // A nop will be inserted here before the 64-bit literals.
+
+ std::string expected =
+ "ldpc $a1, 1f\n"
+ // The GNU assembler incorrectly requires the ldpc instruction to be located
+ // at an address that's a multiple of 8. TODO: Remove this workaround if/when
+ // the assembler is fixed.
+ // "ldpc $a2, 2f\n"
+ ".word 0xECD80004\n"
+ "ldpc $a3, 3f\n"
+ "lapc $v0, 1f\n"
+ "lapc $v1, 2f\n"
+ "nop\n"
+ "1:\n"
+ ".dword 0x0123456789ABCDEF\n"
+ "2:\n"
+ ".dword 0x5555555555555555\n"
+ "3:\n"
+ ".dword 0xAAAAAAAAAAAAAAAA\n";
+ DriverStr(expected, "LongLiteralAlignmentNop");
+ EXPECT_EQ(__ GetLabelLocation(literal1->GetLabel()), 6 * 4u);
+ EXPECT_EQ(__ GetLabelLocation(literal2->GetLabel()), 8 * 4u);
+ EXPECT_EQ(__ GetLabelLocation(literal3->GetLabel()), 10 * 4u);
+}
+
+TEST_F(AssemblerMIPS64Test, LongLiteralAlignmentNoNop) {
+ mips64::Literal* literal1 = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ mips64::Literal* literal2 = __ NewLiteral<uint64_t>(UINT64_C(0x5555555555555555));
+ __ LoadLiteral(mips64::A1, mips64::kLoadDoubleword, literal1);
+ __ LoadLiteral(mips64::A2, mips64::kLoadDoubleword, literal2);
+ __ LoadLabelAddress(mips64::V0, literal1->GetLabel());
+ __ LoadLabelAddress(mips64::V1, literal2->GetLabel());
+
+ std::string expected =
+ "ldpc $a1, 1f\n"
+ // The GNU assembler incorrectly requires the ldpc instruction to be located
+ // at an address that's a multiple of 8. TODO: Remove this workaround if/when
+ // the assembler is fixed.
+ // "ldpc $a2, 2f\n"
+ ".word 0xECD80003\n"
+ "lapc $v0, 1f\n"
+ "lapc $v1, 2f\n"
+ "1:\n"
+ ".dword 0x0123456789ABCDEF\n"
+ "2:\n"
+ ".dword 0x5555555555555555\n";
+ DriverStr(expected, "LongLiteralAlignmentNoNop");
+ EXPECT_EQ(__ GetLabelLocation(literal1->GetLabel()), 4 * 4u);
+ EXPECT_EQ(__ GetLabelLocation(literal2->GetLabel()), 6 * 4u);
+}
+
+TEST_F(AssemblerMIPS64Test, FarLongLiteralAlignmentNop) {
+ mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
+ __ LoadLabelAddress(mips64::V1, literal->GetLabel());
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ // A nop will be inserted here before the 64-bit literal.
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(3f - 1b)\n"
+ "ld $v0, %lo(3f - 1b)($at)\n"
+ "2:\n"
+ "auipc $at, %hi(3f - 2b)\n"
+ "addiu $v1, $at, %lo(3f - 2b)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "nop\n"
+ "3:\n"
+ ".dword 0x0123456789ABCDEF\n";
+ DriverStr(expected, "FarLongLiteralAlignmentNop");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (5 + kAdduCount) * 4);
+}
+
TEST_F(AssemblerMIPS64Test, Bitswap) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
}
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 4787395..c82600b 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -22,6 +22,8 @@
#include "android-base/logging.h"
#include "android-base/stringprintf.h"
+#include "base/bit_utils.h"
+
using android::base::StringPrintf;
namespace art {
@@ -154,6 +156,7 @@
{ kSpecial3Mask | 0x3f, (31 << kOpcodeShift), "ext", "TSAZ", },
{ kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 3, "dext", "TSAZ", },
{ kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 4, "ins", "TSAz", },
+ { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 6, "dinsu", "TSFz", },
{ kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
(31 << kOpcodeShift) | (16 << 6) | 32,
"seb",
@@ -218,8 +221,8 @@
{ kITypeMask, 12 << kOpcodeShift, "andi", "TSi", },
{ kITypeMask, 13 << kOpcodeShift, "ori", "TSi", },
{ kITypeMask, 14 << kOpcodeShift, "xori", "TSi", },
- { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "TI", },
- { kITypeMask, 15 << kOpcodeShift, "aui", "TSI", },
+ { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "Ti", },
+ { kITypeMask, 15 << kOpcodeShift, "aui", "TSi", },
{ kITypeMask | (0x3e3 << 16), (17 << kOpcodeShift) | (8 << 21), "bc1f", "cB" },
{ kITypeMask | (0x3e3 << 16), (17 << kOpcodeShift) | (8 << 21) | (1 << 16), "bc1t", "cB" },
@@ -335,6 +338,8 @@
{ kITypeMask | (0x1f << 16), (59u << kOpcodeShift) | (30 << 16), "auipc", "Si" },
{ kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (0 << 19), "addiupc", "Sp" },
{ kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (1 << 19), "lwpc", "So" },
+ { kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (2 << 19), "lwupc", "So" },
+ { kITypeMask | (0x7 << 18), (59u << kOpcodeShift) | (6 << 18), "ldpc", "S0" },
{ kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
{ kITypeMask | (0x1f << 21), 62u << kOpcodeShift, "jialc", "Ti" },
{ kITypeMask | (1 << 21), (62u << kOpcodeShift) | (1 << 21), "bnezc", "Sb" }, // TODO: de-dup?
@@ -468,6 +473,7 @@
case 'D': args << 'r' << rd; break;
case 'd': args << 'f' << rd; break;
case 'a': args << 'f' << sa; break;
+ case 'F': args << (sa + 32); break; // dinsu position.
case 'f': // Floating point "fmt".
{
size_t fmt = (instruction >> 21) & 0x7; // TODO: other fmts?
@@ -481,9 +487,6 @@
}
continue; // No ", ".
}
- case 'I': // Upper 16-bit immediate.
- args << reinterpret_cast<void*>((instruction & 0xffff) << 16);
- break;
case 'i': // Sign-extended lower 16-bit immediate.
args << static_cast<int16_t>(instruction & 0xffff);
break;
@@ -512,7 +515,7 @@
}
}
break;
- case 'o': // 19-bit offset in lwpc.
+ case 'o': // 19-bit offset in lwpc and lwupc.
{
int32_t offset = (instruction & 0x7ffff) - ((instruction & 0x40000) << 1);
offset <<= 2;
@@ -520,6 +523,15 @@
args << StringPrintf(" ; %+d", offset);
}
break;
+ case '0': // 18-bit offset in ldpc.
+ {
+ int32_t offset = (instruction & 0x3ffff) - ((instruction & 0x20000) << 1);
+ offset <<= 3;
+ uintptr_t ptr = RoundDown(reinterpret_cast<uintptr_t>(instr_ptr), 8);
+ args << FormatInstructionPointer(reinterpret_cast<const uint8_t*>(ptr + offset));
+ args << StringPrintf(" ; %+d", offset);
+ }
+ break;
case 'P': // 26-bit offset in bc and balc.
{
int32_t offset = (instruction & 0x3ffffff) - ((instruction & 0x2000000) << 1);
@@ -541,7 +553,7 @@
case 'T': args << 'r' << rt; break;
case 't': args << 'f' << rt; break;
case 'Z': args << (rd + 1); break; // sz ([d]ext size).
- case 'z': args << (rd - sa + 1); break; // sz ([d]ins size).
+ case 'z': args << (rd - sa + 1); break; // sz ([d]ins, dinsu size).
}
if (*(args_fmt + 1)) {
args << ", ";
@@ -551,17 +563,14 @@
}
}
- // TODO: Simplify this once these sequences are simplified in the compiler.
// Special cases for sequences of:
// pc-relative +/- 2GB branch:
// auipc reg, imm
// jic reg, imm
// pc-relative +/- 2GB branch and link:
// auipc reg, imm
- // daddiu reg, reg, imm
- // jialc reg, 0
- if (((op == 0x36 && rs == 0 && rt != 0) || // jic
- (op == 0x19 && rs == rt && rt != 0)) && // daddiu
+ // jialc reg, imm
+ if (((op == 0x36 || op == 0x3E) && rs == 0 && rt != 0) && // ji[al]c
last_ptr_ && (intptr_t)instr_ptr - (intptr_t)last_ptr_ == 4 &&
(last_instr_ & 0xFC1F0000) == 0xEC1E0000 && // auipc
((last_instr_ >> 21) & 0x1F) == rt) {
@@ -569,9 +578,9 @@
offset -= (offset & 0x8000) << 1;
offset -= 4;
if (op == 0x36) {
- args << " ; b ";
+ args << " ; bc ";
} else {
- args << " ; move r" << rt << ", ";
+ args << " ; balc ";
}
args << FormatInstructionPointer(instr_ptr + (int32_t)offset);
args << StringPrintf(" ; %+d", (int32_t)offset);
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index ef03bb3..c78db3e 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -134,8 +134,7 @@
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
- ->GetDexCache()->NumResolvedMethods());
+ GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
pointer_size);
@@ -154,8 +153,7 @@
// NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
// without accessing the DexCache and we don't want to do that in release build.
DCHECK_LT(method_index,
- GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()
- ->GetDexCache()->NumResolvedMethods());
+ GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr);
mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
method_index,
@@ -186,8 +184,7 @@
inline mirror::Class* ArtMethod::GetDexCacheResolvedType(dex::TypeIndex type_index,
PointerSize pointer_size) {
if (kWithCheck) {
- mirror::DexCache* dex_cache =
- GetInterfaceMethodIfProxy(pointer_size)->GetDeclaringClass()->GetDexCache();
+ mirror::DexCache* dex_cache = GetInterfaceMethodIfProxy(pointer_size)->GetDexCache();
if (UNLIKELY(type_index.index_ >= dex_cache->NumResolvedTypes())) {
ThrowArrayIndexOutOfBoundsException(type_index.index_, dex_cache->NumResolvedTypes());
return nullptr;
@@ -333,7 +330,7 @@
}
inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
- return GetDeclaringClass()->GetDexFile().GetCodeItem(GetCodeItemOffset());
+ return GetDexFile()->GetCodeItem(GetCodeItemOffset());
}
inline bool ArtMethod::IsResolvedTypeIdx(dex::TypeIndex type_idx, PointerSize pointer_size) {
@@ -398,11 +395,20 @@
}
inline mirror::DexCache* ArtMethod::GetDexCache() {
- DCHECK(!IsProxyMethod());
- if (UNLIKELY(IsObsolete())) {
- return GetObsoleteDexCache();
- } else {
+ if (LIKELY(!IsObsolete())) {
return GetDeclaringClass()->GetDexCache();
+ } else {
+ DCHECK(!IsProxyMethod());
+ return GetObsoleteDexCache();
+ }
+}
+
+inline mirror::StringDexCacheType* ArtMethod::GetDexCacheStrings() {
+ if (LIKELY(!IsObsolete())) {
+ return GetDeclaringClass()->GetDexCacheStrings();
+ } else {
+ DCHECK(!IsProxyMethod());
+ return GetObsoleteDexCache()->GetStrings();
}
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 3bc6f5d..83ed1db 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -27,6 +27,7 @@
#include "invoke_type.h"
#include "method_reference.h"
#include "modifiers.h"
+#include "mirror/dex_cache.h"
#include "mirror/object.h"
#include "obj_ptr.h"
#include "read_barrier_option.h"
@@ -220,6 +221,12 @@
return !IsIntrinsic() && (GetAccessFlags() & kAccObsoleteMethod) != 0;
}
+ void SetIsObsolete() {
+ // TODO We should really support redefining intrinsic if possible.
+ DCHECK(!IsIntrinsic());
+ SetAccessFlags(GetAccessFlags() | kAccObsoleteMethod);
+ }
+
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsNative() {
return (GetAccessFlags<kReadBarrierOption>() & kAccNative) != 0;
@@ -326,6 +333,10 @@
ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+
+ ALWAYS_INLINE mirror::StringDexCacheType* GetDexCacheStrings()
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_index,
ArtMethod* new_method,
PointerSize pointer_size)
diff --git a/runtime/cha.cc b/runtime/cha.cc
index be675a8..d94b091 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -100,7 +100,11 @@
bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
- if (method == nullptr || method->IsRuntimeMethod() || method->IsNative()) {
+ // Avoid types of methods that do not have an oat quick method header.
+ if (method == nullptr ||
+ method->IsRuntimeMethod() ||
+ method->IsNative() ||
+ method->IsProxyMethod()) {
return true;
}
if (GetCurrentQuickFrame() == nullptr) {
@@ -110,6 +114,7 @@
// Method may have multiple versions of compiled code. Check
// the method header to see if it has should_deoptimize flag.
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ DCHECK(method_header != nullptr);
if (!method_header->HasShouldDeoptimizeFlag()) {
// This compiled version doesn't have should_deoptimize flag. Skip.
return true;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 0a65cd1..213986a 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -25,6 +25,7 @@
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/iftable.h"
+#include "mirror/throwable.h"
#include "mirror/object_array.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change-inl.h"
@@ -89,17 +90,28 @@
inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
+ if (kIsDebugBuild) {
+ Thread::Current()->AssertNoPendingException();
+ }
ObjPtr<mirror::Class> resolved_type =
referrer->GetDexCacheResolvedType(type_idx, image_pointer_size_);
if (UNLIKELY(resolved_type == nullptr)) {
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
+ // There could be an out of bounds exception from GetDexCacheResolvedType, don't call
+ // ResolveType for this case.
+ if (LIKELY(!hs.Self()->IsExceptionPending())) {
+ ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
+ // Note: We cannot check here to see whether we added the type to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
+ } else {
+ // Make sure its an array out of bounds exception.
+ DCHECK(hs.Self()->GetException()->GetClass()->DescriptorEquals(
+ "Ljava/lang/ArrayIndexOutOfBoundsException;"));
+ }
}
return resolved_type.Ptr();
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index f6eeffc..14c9c21 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -563,7 +563,7 @@
HandleWrapperObjPtr<mirror::Object> h_this(hs2.NewHandleWrapper(this_object));
Handle<mirror::Class> h_referring_class(hs2.NewHandle(referrer->GetDeclaringClass()));
const dex::TypeIndex method_type_idx =
- h_referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
+ referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
mirror::Class* method_reference_class = class_linker->ResolveType(method_type_idx, referrer);
if (UNLIKELY(method_reference_class == nullptr)) {
// Bad type idx.
@@ -673,8 +673,7 @@
size_t expected_size) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
ArtField* resolved_field =
- referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx,
- kRuntimePointerSize);
+ referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
if (UNLIKELY(resolved_field == nullptr)) {
return nullptr;
}
@@ -733,7 +732,7 @@
}
mirror::Class* referring_class = referrer->GetDeclaringClass();
ArtMethod* resolved_method =
- referring_class->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
+ referrer->GetDexCache()->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (UNLIKELY(resolved_method == nullptr)) {
return nullptr;
}
@@ -759,9 +758,9 @@
} else if (type == kSuper) {
// TODO This lookup is rather slow.
dex::TypeIndex method_type_idx =
- referring_class->GetDexFile().GetMethodId(method_idx).class_idx_;
+ referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
mirror::Class* method_reference_class =
- referring_class->GetDexCache()->GetResolvedType(method_type_idx);
+ referrer->GetDexCache()->GetResolvedType(method_type_idx);
if (method_reference_class == nullptr) {
// Need to do full type resolution...
return nullptr;
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 2cde7d5..081be96 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -75,11 +75,10 @@
MutexLock mu(self, *Locks::reference_processor_lock_);
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
- mirror::HeapReference<mirror::Object>* const referent_addr =
- reference->GetReferentReferenceAddr();
+ ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>();
// If the referent became cleared, return it. Don't need barrier since thread roots can't get
// updated until after we leave the function due to holding the mutator lock.
- if (referent_addr->AsMirrorPtr() == nullptr) {
+ if (referent == nullptr) {
return nullptr;
}
// Try to see if the referent is already marked by using the is_marked_callback. We can return
@@ -91,10 +90,15 @@
// case only black nodes can be safely returned. If the GC is preserving references, the
// mutator could take a white field from a grey or white node and move it somewhere else
// in the heap causing corruption since this field would get swept.
- if (collector_->IsMarkedHeapReference(referent_addr)) {
+ // Use the cached referent instead of calling GetReferent since other threads could call
+ // Reference.clear() after we did the null check resulting in a null pointer being
+ // incorrectly passed to IsMarked. b/33569625
+ ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr());
+ if (forwarded_ref != nullptr) {
+ // Non null means that it is marked.
if (!preserving_references_ ||
(LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
- return referent_addr->AsMirrorPtr();
+ return forwarded_ref;
}
}
}
@@ -265,11 +269,23 @@
}
}
-bool ReferenceProcessor::MakeCircularListIfUnenqueued(
- ObjPtr<mirror::FinalizerReference> reference) {
+void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::reference_processor_lock_);
- // Wait untul we are done processing reference.
+ // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
+ // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
+ // This also handles the race where the referent gets cleared after a null check but before
+ // IsMarkedHeapReference is called.
+ WaitUntilDoneProcessingReferences(self);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ ref->ClearReferent<true>();
+ } else {
+ ref->ClearReferent<false>();
+ }
+}
+
+void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
+ // Wait until we are done processing reference.
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
@@ -277,6 +293,13 @@
self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
+}
+
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(
+ ObjPtr<mirror::FinalizerReference> reference) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::reference_processor_lock_);
+ WaitUntilDoneProcessingReferences(self);
// At this point, since the sentinel of the reference is live, it is guaranteed to not be
// enqueued if we just finished processing references. Otherwise, we may be doing the main GC
// phase. Since we are holding the reference processor lock, it guarantees that reference
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 759b7e1..b15544d 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -73,6 +73,9 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
+ void ClearReferent(ObjPtr<mirror::Reference> ref)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::reference_processor_lock_);
private:
bool SlowPathEnabled() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -84,6 +87,10 @@
// referents.
void StartPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
void StopPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
+ // Wait until reference processing is done.
+ void WaitUntilDoneProcessingReferences(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::reference_processor_lock_);
// Collector which is clearing references, used by the GetReferent to return referents which are
// already marked.
collector::GarbageCollector* collector_ GUARDED_BY(Locks::reference_processor_lock_);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 870d1ae..3956af4 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -557,8 +557,10 @@
}
Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
- if (interpreter_stubs_installed_) {
+ if (interpreter_stubs_installed_ && interpret_only_) {
return InstrumentationLevel::kInstrumentWithInterpreter;
+ } else if (interpreter_stubs_installed_) {
+ return InstrumentationLevel::kInstrumentWithInterpreterAndJit;
} else if (entry_exit_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInstrumentationStubs;
} else {
@@ -566,6 +568,14 @@
}
}
+bool Instrumentation::RequiresInstrumentationInstallation(InstrumentationLevel new_level) const {
+ // We need to reinstall instrumentation if we go to a different level or if the current level is
+ // kInstrumentWithInterpreterAndJit since that level does not force all code to always use the
+ // interpreter and so we might have started running optimized code again.
+ return new_level == InstrumentationLevel::kInstrumentWithInterpreterAndJit ||
+ GetCurrentInstrumentationLevel() != new_level;
+}
+
void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desired_level) {
// Store the instrumentation level for this key or remove it.
if (desired_level == InstrumentationLevel::kInstrumentNothing) {
@@ -585,8 +595,7 @@
interpret_only_ = (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) ||
forced_interpret_only_;
- InstrumentationLevel current_level = GetCurrentInstrumentationLevel();
- if (requested_level == current_level) {
+ if (!RequiresInstrumentationInstallation(requested_level)) {
// We're already set.
return;
}
@@ -595,7 +604,7 @@
Locks::mutator_lock_->AssertExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
if (requested_level > InstrumentationLevel::kInstrumentNothing) {
- if (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) {
+ if (requested_level >= InstrumentationLevel::kInstrumentWithInterpreterAndJit) {
interpreter_stubs_installed_ = true;
entry_exit_stubs_installed_ = true;
} else {
@@ -842,7 +851,8 @@
void Instrumentation::DisableDeoptimization(const char* key) {
CHECK_EQ(deoptimization_enabled_, true);
// If we deoptimized everything, undo it.
- if (interpreter_stubs_installed_) {
+ InstrumentationLevel level = GetCurrentInstrumentationLevel();
+ if (level == InstrumentationLevel::kInstrumentWithInterpreter) {
UndeoptimizeEverything(key);
}
// Undeoptimized selected methods.
@@ -869,6 +879,14 @@
return !deoptimization_enabled_ && !interpreter_stubs_installed_;
}
+// TODO we don't check deoptimization_enabled_ because currently there isn't really any support for
+// multiple users of instrumentation. Since this is just a temporary state anyway pending work to
+// ensure that the current_method doesn't get kept across suspend points this should be okay.
+// TODO Remove once b/33630159 is resolved.
+void Instrumentation::ReJitEverything(const char* key) {
+ ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreterAndJit);
+}
+
void Instrumentation::DeoptimizeEverything(const char* key) {
CHECK(deoptimization_enabled_);
ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreter);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 1e5fcf2..872efac 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -133,6 +133,9 @@
enum class InstrumentationLevel {
kInstrumentNothing, // execute without instrumentation
kInstrumentWithInstrumentationStubs, // execute with instrumentation entry/exit stubs
+ kInstrumentWithInterpreterAndJit, // execute with interpreter initially and later the JIT
+ // (if it is enabled). This level is special in that it
+ // always requires re-instrumentation.
kInstrumentWithInterpreter // execute with interpreter
};
@@ -163,6 +166,13 @@
}
bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Executes everything with the interpreter/jit (if available).
+ void ReJitEverything(const char* key)
+ REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
+ REQUIRES(!Locks::thread_list_lock_,
+ !Locks::classlinker_classes_lock_,
+ !deoptimized_methods_lock_);
+
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
@@ -435,6 +445,10 @@
private:
InstrumentationLevel GetCurrentInstrumentationLevel() const;
+ // Returns true if moving to the given instrumentation level requires the installation of stubs.
+ // False otherwise.
+ bool RequiresInstrumentationInstallation(InstrumentationLevel new_level) const;
+
// Does the job of installing or removing instrumentation code within methods.
// In order to support multiple clients using instrumentation at the same time,
// the caller must pass a unique key (a string) identifying it so we remind which
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index c9a5b44..02a99e7 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -249,17 +249,16 @@
}
}
ArtMethod* method = shadow_frame.GetMethod();
- ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
// MethodVerifier refuses methods with string_idx out of bounds.
DCHECK_LT(string_idx.index_ % mirror::DexCache::kDexCacheStringCacheSize,
- declaring_class->GetDexFile().NumStringIds());
+ method->GetDexFile()->NumStringIds());
ObjPtr<mirror::String> string_ptr =
- mirror::StringDexCachePair::Lookup(declaring_class->GetDexCacheStrings(),
+ mirror::StringDexCachePair::Lookup(method->GetDexCacheStrings(),
string_idx.index_,
mirror::DexCache::kDexCacheStringCacheSize).Read();
if (UNLIKELY(string_ptr == nullptr)) {
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(),
string_idx,
dex_cache);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1b0ad83..d7827ef 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -636,6 +636,38 @@
return CodeCacheSizeLocked();
}
+// This invalidates old_method. Once this function returns one can no longer use old_method to
+// execute code unless it is fixed up. This fixup will happen later in the process of installing a
+// class redefinition.
+// TODO We should add some info to ArtMethod to note that 'old_method' has been invalidated and
+// shouldn't be used since it is no longer logically in the jit code cache.
+// TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
+void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
+ MutexLock mu(Thread::Current(), lock_);
+ // Update ProfilingInfo to the new one.
+ if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
+ DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
+ ProfilingInfo* info = old_method->GetProfilingInfo(kRuntimePointerSize);
+ // Since the JIT should be paused and all threads suspended by the time this is called these
+ // checks should always pass.
+ DCHECK(!info->IsInUseByCompiler());
+ new_method->SetProfilingInfo(info);
+ info->method_ = new_method;
+ }
+ // Update method_code_map_ to point to the new method.
+ for (auto& it : method_code_map_) {
+ if (it.second == old_method) {
+ it.second = new_method;
+ }
+ }
+ // Update osr_code_map_ to point to the new method.
+ auto code_map = osr_code_map_.find(old_method);
+ if (code_map != osr_code_map_.end()) {
+ osr_code_map_.Put(new_method, code_map->second);
+ osr_code_map_.erase(old_method);
+ }
+}
+
size_t JitCodeCache::CodeCacheSizeLocked() {
return used_memory_for_code_;
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 30e2efb..5bfe661 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -217,6 +217,11 @@
void DisallowInlineCacheAccess() REQUIRES(!lock_);
void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+ // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
+ // 'new_method' since it is being made obsolete.
+ void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
+ REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 9902bb5..9fbf2e3 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -128,7 +128,9 @@
const uint32_t number_of_inline_caches_;
// Method this profiling info is for.
- ArtMethod* const method_;
+ // Not 'const' as JVMTI introduces obsolete methods that we implement by creating new ArtMethods.
+ // See JitCodeCache::MoveObsoleteMethod.
+ ArtMethod* method_;
// Whether the ArtMethod is currently being compiled. This flag
// is implicitly guarded by the JIT code cache lock.
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index ec265e5..6f88cc5 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -19,7 +19,6 @@
#include "array.h"
#include "art_field.h"
-#include "art_method.h"
#include "class.h"
#include "dex_file_types.h"
#include "object.h"
@@ -27,6 +26,7 @@
namespace art {
+class ArtMethod;
struct DexCacheOffsets;
class DexFile;
class ImageWriter;
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index bedca10..3f4573d 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -33,8 +33,15 @@
return soa.AddLocalReference<jobject>(referent);
}
+static void Reference_clear(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ ObjPtr<mirror::Reference> ref = soa.Decode<mirror::Reference>(javaThis);
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->ClearReferent(ref);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Reference, getReferent, "!()Ljava/lang/Object;"),
+ NATIVE_METHOD(Reference, clear, "!()V"),
};
void register_java_lang_ref_Reference(JNIEnv* env) {
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index d0349b9..429409a 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -38,6 +38,8 @@
#include "events-inl.h"
#include "gc/allocation_listener.h"
#include "instrumentation.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jni_env_ext-inl.h"
#include "jvmti_allocator.h"
#include "mirror/class.h"
@@ -49,6 +51,142 @@
namespace openjdkjvmti {
+// This visitor walks thread stacks and allocates and sets up the obsolete methods. It also does
+// some basic sanity checks that the obsolete method is sane.
+class ObsoleteMethodStackVisitor : public art::StackVisitor {
+ protected:
+ ObsoleteMethodStackVisitor(
+ art::Thread* thread,
+ art::LinearAlloc* allocator,
+ const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
+ /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps,
+ /*out*/bool* success,
+ /*out*/std::string* error_msg)
+ : StackVisitor(thread,
+ /*context*/nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ allocator_(allocator),
+ obsoleted_methods_(obsoleted_methods),
+ obsolete_maps_(obsolete_maps),
+ success_(success),
+ is_runtime_frame_(false),
+ error_msg_(error_msg) {}
+
+ ~ObsoleteMethodStackVisitor() OVERRIDE {}
+
+ public:
+ // Returns true if we successfully installed obsolete methods on this thread, filling
+ // obsolete_maps_ with the translations if needed. Returns false and fills error_msg_ if we fail.
+ // The stack is cleaned up when we fail.
+ static bool UpdateObsoleteFrames(
+ art::Thread* thread,
+ art::LinearAlloc* allocator,
+ const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
+ /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps,
+ /*out*/std::string* error_msg) REQUIRES(art::Locks::mutator_lock_) {
+ bool success = true;
+ ObsoleteMethodStackVisitor visitor(thread,
+ allocator,
+ obsoleted_methods,
+ obsolete_maps,
+ &success,
+ error_msg);
+ visitor.WalkStack();
+ if (!success) {
+ RestoreFrames(thread, *obsolete_maps);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ static void RestoreFrames(
+ art::Thread* thread ATTRIBUTE_UNUSED,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsolete_maps ATTRIBUTE_UNUSED)
+ REQUIRES(art::Locks::mutator_lock_) {
+ LOG(FATAL) << "Restoring stack frames is not yet supported.";
+ }
+
+ bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ art::ArtMethod* old_method = GetMethod();
+ // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
+ // works through runtime methods.
+ bool prev_was_runtime_frame_ = is_runtime_frame_;
+ is_runtime_frame_ = old_method->IsRuntimeMethod();
+ if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
+ // This works since when we deoptimize we set shadow frames for all frames until a
+ // native/runtime transition and for those set the return PC to a function that will complete
+ // the deoptimization. This does leave us with the unfortunate side-effect that frames just
+ // below runtime frames cannot be deoptimized at the moment.
+ // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
+ // works through runtime methods.
+ // TODO b/33616143
+ if (!IsShadowFrame() && prev_was_runtime_frame_) {
+ *error_msg_ = art::StringPrintf("Deoptimization failed due to runtime method in stack.");
+ *success_ = false;
+ return false;
+ }
+ // We cannot ensure that the right dex file is used in inlined frames so we don't support
+ // redefining them.
+ DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition";
+ // TODO We should really support intrinsic obsolete methods.
+ // TODO We should really support redefining intrinsics.
+ // We don't support intrinsics so check for them here.
+ DCHECK(!old_method->IsIntrinsic());
+ art::ArtMethod* new_obsolete_method = nullptr;
+ auto obsolete_method_pair = obsolete_maps_->find(old_method);
+ if (obsolete_method_pair == obsolete_maps_->end()) {
+ // Create a new Obsolete Method and put it in the list.
+ art::Runtime* runtime = art::Runtime::Current();
+ art::ClassLinker* cl = runtime->GetClassLinker();
+ auto ptr_size = cl->GetImagePointerSize();
+ const size_t method_size = art::ArtMethod::Size(ptr_size);
+ auto* method_storage = allocator_->Alloc(GetThread(), method_size);
+ if (method_storage == nullptr) {
+ *success_ = false;
+ *error_msg_ = art::StringPrintf("Unable to allocate storage for obsolete version of '%s'",
+ old_method->PrettyMethod().c_str());
+ return false;
+ }
+ new_obsolete_method = new (method_storage) art::ArtMethod();
+ new_obsolete_method->CopyFrom(old_method, ptr_size);
+ new_obsolete_method->SetIsObsolete();
+ obsolete_maps_->insert({old_method, new_obsolete_method});
+ // Update JIT Data structures to point to the new method.
+ art::jit::Jit* jit = art::Runtime::Current()->GetJit();
+ if (jit != nullptr && jit->GetCodeCache()->ContainsMethod(old_method)) {
+ // Notify the JIT we are making this obsolete method. It will update it's maps and change
+ // entrypoint etc over.
+ jit->GetCodeCache()->MoveObsoleteMethod(old_method, new_obsolete_method);
+ }
+ DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
+ } else {
+ new_obsolete_method = obsolete_method_pair->second;
+ }
+ DCHECK(new_obsolete_method != nullptr);
+ SetMethod(new_obsolete_method);
+ }
+ *success_ = true;
+ return true;
+ }
+
+ private:
+ // The linear allocator we should use to make new methods.
+ art::LinearAlloc* allocator_;
+ // The set of all methods which could be obsoleted.
+ const std::unordered_set<art::ArtMethod*>& obsoleted_methods_;
+ // A map from the original to the newly allocated obsolete method for frames on this thread. The
+ // values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
+ // the redefined classes ClassExt by the caller.
+ std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_;
+ bool* success_;
+ // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
+ // works through runtime methods.
+ bool is_runtime_frame_;
+ std::string* error_msg_;
+};
+
+
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
jint data_len,
@@ -72,6 +210,8 @@
return map;
}
+// TODO This should handle doing multiple classes at once so we need to do less cleanup when things
+// go wrong.
jvmtiError Redefiner::RedefineClass(ArtJvmTiEnv* env,
art::Runtime* runtime,
art::Thread* self,
@@ -112,6 +252,8 @@
*error_msg = os.str();
return ERR(INVALID_CLASS_FORMAT);
}
+ // Stop JIT for the duration of this redefine.
+ art::jit::ScopedJitSuspend suspend_jit;
// Get shared mutator lock.
art::ScopedObjectAccess soa(self);
art::StackHandleScope<1> hs(self);
@@ -292,6 +434,109 @@
return true;
}
+struct CallbackCtx {
+ Redefiner* const r;
+ art::LinearAlloc* allocator;
+ std::unordered_map<art::ArtMethod*, art::ArtMethod*> obsolete_map;
+ std::unordered_set<art::ArtMethod*> obsolete_methods;
+ bool success;
+ std::string* error_msg;
+
+ CallbackCtx(Redefiner* self, art::LinearAlloc* alloc, std::string* error)
+ : r(self), allocator(alloc), success(true), error_msg(error) {}
+};
+
+void DoRestoreObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
+ CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata);
+ ObsoleteMethodStackVisitor::RestoreFrames(t, data->obsolete_map);
+}
+
+void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
+ CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata);
+ if (data->success) {
+ // Don't do anything if we already failed once.
+ data->success = ObsoleteMethodStackVisitor::UpdateObsoleteFrames(t,
+ data->allocator,
+ data->obsolete_methods,
+ &data->obsolete_map,
+ data->error_msg);
+ }
+}
+
+void Redefiner::AddAllDeclaredMethods(
+ art::mirror::Class* art_klass,
+ art::PointerSize ptr_size,
+ /*out*/std::unordered_set<art::ArtMethod*>* declared_methods) {
+ for (auto& m : art_klass->GetDeclaredMethods(ptr_size)) {
+ declared_methods->insert(&m);
+ }
+}
+
+bool Redefiner::AllocateObsoleteMethods(art::mirror::Class* art_klass) {
+ art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
+ art::mirror::ClassExt* ext = art_klass->GetExtData();
+ CHECK(ext->GetObsoleteMethods() != nullptr);
+ CallbackCtx ctx(this, art_klass->GetClassLoader()->GetAllocator(), error_msg_);
+ AddAllDeclaredMethods(art_klass, art::kRuntimePointerSize, &ctx.obsolete_methods);
+ for (art::ArtMethod* old_method : ctx.obsolete_methods) {
+ if (old_method->IsIntrinsic()) {
+ *error_msg_ = art::StringPrintf("Method '%s' is intrinsic and cannot be made obsolete!",
+ old_method->PrettyMethod().c_str());
+ return false;
+ }
+ }
+ {
+ art::MutexLock mu(self_, *art::Locks::thread_list_lock_);
+ art::ThreadList* list = art::Runtime::Current()->GetThreadList();
+ list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx));
+ if (!ctx.success) {
+ list->ForEach(DoRestoreObsoleteMethodsCallback, static_cast<void*>(&ctx));
+ return false;
+ }
+ }
+ FillObsoleteMethodMap(art_klass, ctx.obsolete_map);
+ return true;
+}
+
+void Redefiner::FillObsoleteMethodMap(
+ art::mirror::Class* art_klass,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes) {
+ int32_t index = 0;
+ art::mirror::ClassExt* ext_data = art_klass->GetExtData();
+ art::mirror::PointerArray* obsolete_methods = ext_data->GetObsoleteMethods();
+ art::mirror::ObjectArray<art::mirror::DexCache>* obsolete_dex_caches =
+ ext_data->GetObsoleteDexCaches();
+ int32_t num_method_slots = obsolete_methods->GetLength();
+ // Find the first empty index.
+ for (; index < num_method_slots; index++) {
+ if (obsolete_methods->GetElementPtrSize<art::ArtMethod*>(
+ index, art::kRuntimePointerSize) == nullptr) {
+ break;
+ }
+ }
+ // Make sure we have enough space.
+ CHECK_GT(num_method_slots, static_cast<int32_t>(obsoletes.size() + index));
+ CHECK(obsolete_dex_caches->Get(index) == nullptr);
+ // Fill in the map.
+ for (auto& obs : obsoletes) {
+ obsolete_methods->SetElementPtrSize(index, obs.second, art::kRuntimePointerSize);
+ obsolete_dex_caches->Set(index, art_klass->GetDexCache());
+ index++;
+ }
+}
+
+// TODO It should be possible to only deoptimize the specific obsolete methods.
+// TODO ReJitEverything can (sort of) fail. In certain cases it will skip deoptimizing some frames.
+// If one of these frames is an obsolete method we have a problem. b/33616143
+// TODO This shouldn't be necessary once we can ensure that the current method is not kept in
+// registers across suspend points.
+// TODO Pending b/33630159
+void Redefiner::EnsureObsoleteMethodsAreDeoptimized() {
+ art::ScopedAssertNoThreadSuspension nts("Deoptimizing everything!");
+ art::instrumentation::Instrumentation* i = runtime_->GetInstrumentation();
+ i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
+}
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<5> hs(self_);
// TODO We might want to have a global lock (or one based on the class being redefined at least)
@@ -341,7 +586,8 @@
art::ObjPtr<art::mirror::LongArray> original_dex_file_cookie(nullptr);
if (!UpdateJavaDexFile(java_dex_file.Get(),
new_dex_file_cookie.Get(),
- &original_dex_file_cookie)) {
+ &original_dex_file_cookie) ||
+ !AllocateObsoleteMethods(art_class.Get())) {
// Release suspendAll
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
@@ -357,18 +603,20 @@
self_->TransitionFromSuspendedToRunnable();
return result_;
}
- // Update the ClassObjects Keep the old DexCache (and other stuff) around so we can restore
- // functions/fields.
- // Verify the new Class.
- // Failure then undo updates to class
- // Do stack walks and allocate obsolete methods
- // Shrink the obsolete method maps if possible?
- // TODO find appropriate class loader. Allocate new dex files array. Pause all java treads.
- // Replace dex files array. Do stack scan + allocate obsoletes. Remove array if possible.
- // TODO We might want to ensure that all threads are stopped for this!
- // AddDexToClassPath();
- // TODO
- // Release suspendAll
+ // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
+ // pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
+ // DexCache.
+ // TODO This can fail (leave some methods optimized) near runtime methods (including
+ // quick-to-interpreter transition function).
+ // TODO Mingyao@ suggested we could maybe just do a retry loop instead of fixing the above.
+ // TODO We probably don't need this at all once we have a way to ensure that the
+ // current_art_method is never stashed in a (physical) register by the JIT and lost to the
+ // stack-walker.
+ EnsureObsoleteMethodsAreDeoptimized();
+ // TODO Verify the new Class.
+ // TODO Failure then undo updates to class
+ // TODO Shrink the obsolete method maps if possible?
+ // TODO find appropriate class loader.
// TODO Put this into a scoped thing.
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
@@ -421,19 +669,23 @@
}
const art::DexFile::ProtoId* proto_id = dex_file_->FindProtoId(method_return_idx,
new_type_list);
- CHECK(proto_id != nullptr || old_type_list == nullptr);
// TODO Return false, cleanup.
+ CHECK(proto_id != nullptr || old_type_list == nullptr);
const art::DexFile::MethodId* method_id = dex_file_->FindMethodId(declaring_class_id,
*new_name_id,
*proto_id);
- CHECK(method_id != nullptr);
// TODO Return false, cleanup.
+ CHECK(method_id != nullptr);
uint32_t dex_method_idx = dex_file_->GetIndexForMethodId(*method_id);
method.SetDexMethodIndex(dex_method_idx);
linker->SetEntryPointsToInterpreter(&method);
method.SetCodeItemOffset(dex_file_->FindCodeItemOffset(*class_def, dex_method_idx));
method.SetDexCacheResolvedMethods(new_dex_cache->GetResolvedMethods(), image_pointer_size);
method.SetDexCacheResolvedTypes(new_dex_cache->GetResolvedTypes(), image_pointer_size);
+ if (!method.IsNative()) {
+ // Reset profiling info.
+ method.SetProfilingInfo(nullptr);
+ }
}
// Update the class fields.
// Need to update class last since the ArtMethod gets its DexFile from the class (which is needed
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index c819acd..2e3df0c 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -64,6 +64,8 @@
namespace openjdkjvmti {
// Class that can redefine a single class's methods.
+// TODO We should really make this be driven by an outside class so we can do multiple classes at
+// the same time and have less required cleanup.
class Redefiner {
public:
// Redefine the given class with the given dex data. Note this function does not take ownership of
@@ -124,6 +126,15 @@
// in the future. For now we will just take the memory hit.
bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
+ // pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
+ // DexCache.
+ // TODO Make this fallible
+ void EnsureObsoleteMethodsAreDeoptimized()
+ REQUIRES(art::Locks::mutator_lock_)
+ REQUIRES(!art::Locks::thread_list_lock_,
+ !art::Locks::classlinker_classes_lock_);
+
art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
// This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
@@ -162,6 +173,17 @@
bool UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
art::ObjPtr<art::mirror::DexCache> new_dex_cache)
REQUIRES(art::Locks::mutator_lock_);
+
+ bool AllocateObsoleteMethods(art::mirror::Class* art_klass) REQUIRES(art::Locks::mutator_lock_);
+
+ void AddAllDeclaredMethods(art::mirror::Class* art_klass,
+ art::PointerSize ptr_size,
+ /*out*/std::unordered_set<art::ArtMethod*>* declared_methods)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void FillObsoleteMethodMap(art::mirror::Class* art_klass,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
+ REQUIRES(art::Locks::mutator_lock_);
};
} // namespace openjdkjvmti
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 792da88..726ca2f 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -614,6 +614,23 @@
return result;
}
+static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(Thread* thread,
+ uint32_t depth) {
+ CHECK_LT(depth, thread->GetInstrumentationStack()->size());
+ return thread->GetInstrumentationStack()->at(depth);
+}
+
+void StackVisitor::SetMethod(ArtMethod* method) {
+ DCHECK(GetMethod() != nullptr);
+ if (cur_shadow_frame_ != nullptr) {
+ cur_shadow_frame_->SetMethod(method);
+ } else {
+ DCHECK(cur_quick_frame_ != nullptr);
+ CHECK(!IsInInlinedFrame()) << "We do not support setting inlined method's ArtMethod!";
+ *cur_quick_frame_ = method;
+ }
+}
+
static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
@@ -775,8 +792,8 @@
void StackVisitor::WalkStack(bool include_transitions) {
DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
CHECK_EQ(cur_depth_, 0U);
+ size_t instrumentation_stack_depth = 0;
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
- uint32_t instrumentation_stack_depth = 0;
size_t inlined_frames_count = 0;
for (const ManagedStack* current_fragment = thread_->GetManagedStack();
@@ -839,7 +856,7 @@
if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) {
CHECK_LT(instrumentation_stack_depth, thread_->GetInstrumentationStack()->size());
const instrumentation::InstrumentationStackFrame& instrumentation_frame =
- thread_->GetInstrumentationStack()->at(instrumentation_stack_depth);
+ GetInstrumentationStackFrame(thread_, instrumentation_stack_depth);
instrumentation_stack_depth++;
if (GetMethod() ==
Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAllCalleeSaves)) {
diff --git a/runtime/stack.h b/runtime/stack.h
index b1e99e5..9dceb29 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -327,6 +327,12 @@
}
}
+ void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) {
+ DCHECK(method != nullptr);
+ DCHECK(method_ != nullptr);
+ method_ = method;
+ }
+
ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
@@ -610,6 +616,10 @@
ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This
+ // doesn't work with inlined methods.
+ void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
+
ArtMethod* GetOuterMethod() const {
return *GetCurrentQuickFrame();
}
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index f9bff23..3af7c01 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -345,8 +345,15 @@
// merely on no issues with linking (valid access flags, superclass and
// implemented interfaces). If the class at any point reached the IsResolved
// status, the requirement holds. This is guaranteed by RegTypeCache::ResolveClass.
- DCHECK(destination != nullptr && !destination->IsPrimitive());
- DCHECK(source != nullptr && !source->IsPrimitive());
+ DCHECK(destination != nullptr);
+ DCHECK(source != nullptr);
+
+ if (destination->IsPrimitive() || source->IsPrimitive()) {
+ // Primitive types are trivially non-assignable to anything else.
+ // We do not need to record trivial assignability, as it will
+ // not change across releases.
+ return;
+ }
if (destination == source ||
destination->IsObjectClass() ||
diff --git a/test/153-reference-stress/expected.txt b/test/153-reference-stress/expected.txt
new file mode 100644
index 0000000..7ef22e9
--- /dev/null
+++ b/test/153-reference-stress/expected.txt
@@ -0,0 +1 @@
+PASS
diff --git a/test/153-reference-stress/info.txt b/test/153-reference-stress/info.txt
new file mode 100644
index 0000000..6bc0040
--- /dev/null
+++ b/test/153-reference-stress/info.txt
@@ -0,0 +1 @@
+Tests java.lang.ref.Reference.get() and GC running in parallel.
diff --git a/test/153-reference-stress/src/Main.java b/test/153-reference-stress/src/Main.java
new file mode 100644
index 0000000..fc6f9cc
--- /dev/null
+++ b/test/153-reference-stress/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.ref.WeakReference;
+
+public class Main {
+ static final int numWeakReferences = 16 * 1024;
+ static WeakReference[] weakReferences = new WeakReference[numWeakReferences];
+ static volatile boolean done = false;
+ static Object keepAlive;
+
+ public static void main(String[] args) throws Exception {
+ // Try to call Reference.get repeatedly while the GC is running.
+ Thread gcThread = new GcThread();
+ Thread[] readerThread = new ReaderThread[4];
+ for (int i = 0; i < readerThread.length; ++i) {
+ readerThread[i] = new ReaderThread();
+ }
+ gcThread.start();
+ for (int i = 0; i < readerThread.length; ++i) {
+ readerThread[i].start();
+ }
+ gcThread.join();
+ for (int i = 0; i < readerThread.length; ++i) {
+ readerThread[i].join();
+ }
+ System.out.println("PASS");
+ }
+
+ static class GcThread extends Thread {
+ GcThread() {
+ Object temp = new Object();
+ for (int j = 0; j < weakReferences.length; ++j) {
+ weakReferences[j] = new WeakReference(temp);
+ }
+ }
+ public void run() {
+ for (int i = 0; i < 1000; ++i) {
+ Object o = new Object();
+ for (int j = 0; j < weakReferences.length; ++j) {
+ weakReferences[j] = new WeakReference(o);
+ }
+ }
+ done = true;
+ }
+ }
+
+ static class ReaderThread extends Thread {
+ public void run() {
+ while (!done) {
+ for (int j = 0; j < weakReferences.length; ++j) {
+ keepAlive = weakReferences[j].get();
+ }
+ for (int j = 0; j < weakReferences.length; ++j) {
+ weakReferences[j].clear();
+ }
+ }
+ }
+ }
+}
diff --git a/test/616-cha-regression-proxy-method/expected.txt b/test/616-cha-regression-proxy-method/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/616-cha-regression-proxy-method/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-regression-proxy-method/info.txt b/test/616-cha-regression-proxy-method/info.txt
new file mode 100644
index 0000000..386a07f
--- /dev/null
+++ b/test/616-cha-regression-proxy-method/info.txt
@@ -0,0 +1 @@
+Regression test for Class Hierarchy Analysis (CHA) on visiting proxy method frame.
diff --git a/test/616-cha-regression-proxy-method/src/Main.java b/test/616-cha-regression-proxy-method/src/Main.java
new file mode 100644
index 0000000..19c92be
--- /dev/null
+++ b/test/616-cha-regression-proxy-method/src/Main.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+
+class Main1 {
+ void foo(int i) {
+ if (i != 1) {
+ printError("error1");
+ }
+ }
+
+ void printError(String msg) {
+ System.out.println(msg);
+ }
+}
+
+class Main2 extends Main1 {
+ void foo(int i) {
+ if (i != 2) {
+ printError("error2");
+ }
+ }
+}
+
+class Proxied implements Runnable {
+ public void run() {
+ synchronized(Main.class) {
+ Main.sOtherThreadStarted = true;
+ // Wait for Main2 to be linked and deoptimization is triggered.
+ try {
+ Main.class.wait();
+ } catch (Exception e) {
+ }
+ }
+ }
+}
+
+class MyInvocationHandler implements InvocationHandler {
+ private final Proxied proxied;
+
+ public MyInvocationHandler(Proxied proxied) {
+ this.proxied = proxied;
+ }
+
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ return method.invoke(proxied, args);
+ }
+}
+
+public class Main {
+ static Main1 sMain1;
+ static Main1 sMain2;
+ static volatile boolean sOtherThreadStarted;
+
+ // sMain1.foo() will be always be Main1.foo() before Main2 is loaded/linked.
+ // So sMain1.foo() can be devirtualized to Main1.foo() and be inlined.
+ // After Dummy.createMain2() which links in Main2, live testOverride() on stack
+ // should be deoptimized.
+ static void testOverride() {
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+
+ // Wait for the other thread to start.
+ while (!sOtherThreadStarted);
+ // Create an Main2 instance and assign it to sMain2.
+ // sMain1 is kept the same.
+ sMain2 = Dummy.createMain2();
+ // Wake up the other thread.
+ synchronized(Main.class) {
+ Main.class.notify();
+ }
+
+ // There should be a deoptimization here right after Main2 is linked by
+ // calling Dummy.createMain2(), even though sMain1 didn't change.
+ // The behavior here would be different if inline-cache is used, which
+ // doesn't deoptimize since sMain1 still hits the type cache.
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+ if (sMain2 != null) {
+ sMain2.foo(sMain2.getClass() == Main1.class ? 1 : 2);
+ }
+ }
+
+ // Test scenarios under which CHA-based devirtualization happens,
+ // and class loading that overrides a method can invalidate compiled code.
+ // Also create a proxy method such that a proxy method's frame is visited
+ // during stack walking.
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ // sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
+ sMain1 = new Main1();
+
+ // Create another thread that calls a proxy method.
+ new Thread() {
+ public void run() {
+ Runnable proxy = (Runnable)Proxy.newProxyInstance(
+ Proxied.class.getClassLoader(),
+ new Class[] { Runnable.class },
+ new MyInvocationHandler(new Proxied()));
+ proxy.run();
+ }
+ }.start();
+
+ ensureJitCompiled(Main.class, "testOverride");
+ // This will create Main2 instance in the middle of testOverride().
+ testOverride();
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+}
+
+// Put createMain2() in another class to avoid class loading due to verifier.
+class Dummy {
+ static Main1 createMain2() {
+ return new Main2();
+ }
+}
diff --git a/test/616-cha/src/Main.java b/test/616-cha/src/Main.java
index 787318d..b617944 100644
--- a/test/616-cha/src/Main.java
+++ b/test/616-cha/src/Main.java
@@ -179,7 +179,7 @@
}
}
- // Test scanerios under which CHA-based devirtualization happens,
+ // Test scenarios under which CHA-based devirtualization happens,
// and class loading that overrides a method can invalidate compiled code.
// Also test pure non-overriding case, which is more for checking generated
// code form.
@@ -206,11 +206,6 @@
// sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
sMain1 = new Main1();
- // Loop enough to get testOverride() JITed.
- for (int i=0; i<100; i++) {
- testOverride(false, false, false);
- }
-
ensureJitCompiled(Main.class, "testOverride");
testOverride(false, false, true);
@@ -244,7 +239,7 @@
private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
}
-// Do it in another class to avoid class loading due to verifier.
+// Put createMain2() in another class to avoid class loading due to verifier.
class Dummy {
static Main1 createMain2() {
return new Main2();
diff --git a/test/630-safecast-array/expected.txt b/test/630-safecast-array/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/630-safecast-array/expected.txt
diff --git a/test/630-safecast-array/info.txt b/test/630-safecast-array/info.txt
new file mode 100644
index 0000000..e105167
--- /dev/null
+++ b/test/630-safecast-array/info.txt
@@ -0,0 +1,3 @@
+Regression test for vdex, which used to crash in AddAssignability
+called by the dex2dex compiler, not anticipating arrays of primitive
+type.
diff --git a/test/630-safecast-array/smali/Main.smali b/test/630-safecast-array/smali/Main.smali
new file mode 100644
index 0000000..a50f37c
--- /dev/null
+++ b/test/630-safecast-array/smali/Main.smali
@@ -0,0 +1,33 @@
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class LMain;
+.super Ljava/lang/Object;
+
+.method public static main([Ljava/lang/String;)V
+.registers 1
+ return-void
+.end method
+
+.method public static testPrimitiveDestination([Ljava/lang/String;)V
+.registers 1
+ check-cast p0, [B
+ return-void
+.end method
+
+.method public static testPrimitiveSource([B)V
+.registers 1
+ check-cast p0, [Ljava/lang/String;
+ return-void
+.end method
diff --git a/test/914-hello-obsolescence/build b/test/914-hello-obsolescence/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/914-hello-obsolescence/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/914-hello-obsolescence/expected.txt b/test/914-hello-obsolescence/expected.txt
new file mode 100644
index 0000000..83efda1
--- /dev/null
+++ b/test/914-hello-obsolescence/expected.txt
@@ -0,0 +1,9 @@
+hello
+Not doing anything here
+goodbye
+hello
+transforming calling function
+goodbye
+Hello - Transformed
+Not doing anything here
+Goodbye - Transformed
diff --git a/test/914-hello-obsolescence/info.txt b/test/914-hello-obsolescence/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/914-hello-obsolescence/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/914-hello-obsolescence/run b/test/914-hello-obsolescence/run
new file mode 100755
index 0000000..b2f0b04
--- /dev/null
+++ b/test/914-hello-obsolescence/run
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+ if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+ else
+ other_args=""
+ fi
+fi
+
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=914-hello-obsolescence,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xfully-deoptable \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/914-hello-obsolescence/src/Main.java b/test/914-hello-obsolescence/src/Main.java
new file mode 100644
index 0000000..46266ef
--- /dev/null
+++ b/test/914-hello-obsolescence/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // public void sayHi(Runnable r) {
+ // System.out.println("Hello - Transformed");
+ // r.run();
+ // System.out.println("Goodbye - Transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAeAQATSGVsbG8gLSBU" +
+ "cmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABVHb29kYnllIC0gVHJhbnNmb3JtZWQBAAlUcmFu" +
+ "c2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZh" +
+ "L2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZh" +
+ "L2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAAAAACAAAA" +
+ "CQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQABAA0ADgABAAsAAAA7AAIA" +
+ "AgAAABeyAAISA7YABCu5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4ABQAWAAYA" +
+ "AQAPAAAAAgAQ");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQAYeAMMXgYWxoeSHAS9EWKCCtVRSAGpqZVQAwAAcAAAAHhWNBIAAAAAAAAAALACAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAMAgAARAEAAKIB" +
+ "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAAB3AgAAfAIA" +
+ "AIUCAACKAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
+ "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAJ8CAAAAAAAAAQABAAEAAACRAgAABAAAAHAQ" +
+ "AwAAAA4ABAACAAIAAACWAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAwBiAAAAGwEBAAAAbiAC" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
+ "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
+ "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00" +
+ "LjEzAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAEBAICABMQCAQHc" +
+ "AgAAAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAACAAAABwAAALQAAAADAAAAAwAAANAAAAAE" +
+ "AAAAAQAAAPQAAAAFAAAABQAAAPwAAAAGAAAAAQAAACQBAAABIAAAAgAAAEQBAAABEAAAAgAAAJQB" +
+ "AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA=");
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ t.sayHi(() -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/914-hello-obsolescence/src/Transform.java b/test/914-hello-obsolescence/src/Transform.java
new file mode 100644
index 0000000..8cda6cd
--- /dev/null
+++ b/test/914-hello-obsolescence/src/Transform.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi(Runnable r) {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Hello" < "LTransform;" < "hello".
+ System.out.println("hello");
+ r.run();
+ System.out.println("goodbye");
+ }
+}
diff --git a/test/915-obsolete-2/build b/test/915-obsolete-2/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/915-obsolete-2/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/915-obsolete-2/expected.txt b/test/915-obsolete-2/expected.txt
new file mode 100644
index 0000000..04aff3a
--- /dev/null
+++ b/test/915-obsolete-2/expected.txt
@@ -0,0 +1,21 @@
+Pre Start private method call
+hello - private
+Post Start private method call
+Not doing anything here
+Pre Finish private method call
+goodbye - private
+Post Finish private method call
+Pre Start private method call
+hello - private
+Post Start private method call
+transforming calling function
+Pre Finish private method call
+Goodbye - private - Transformed
+Post Finish private method call
+Pre Start private method call - Transformed
+Hello - private - Transformed
+Post Start private method call - Transformed
+Not doing anything here
+Pre Finish private method call - Transformed
+Goodbye - private - Transformed
+Post Finish private method call - Transformed
diff --git a/test/915-obsolete-2/info.txt b/test/915-obsolete-2/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/915-obsolete-2/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/915-obsolete-2/run b/test/915-obsolete-2/run
new file mode 100755
index 0000000..bfe227f
--- /dev/null
+++ b/test/915-obsolete-2/run
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+ if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+ else
+ other_args=""
+ fi
+fi
+
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=915-obsolete-2,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xfully-deoptable \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/915-obsolete-2/src/Main.java b/test/915-obsolete-2/src/Main.java
new file mode 100644
index 0000000..bbeb726
--- /dev/null
+++ b/test/915-obsolete-2/src/Main.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // private void Start() {
+ // System.out.println("Hello - private - Transformed");
+ // }
+ //
+ // private void Finish() {
+ // System.out.println("Goodbye - private - Transformed");
+ // }
+ //
+ // public void sayHi(Runnable r) {
+ // System.out.println("Pre Start private method call - Transformed");
+ // Start();
+ // System.out.println("Post Start private method call - Transformed");
+ // r.run();
+ // System.out.println("Pre Finish private method call - Transformed");
+ // Finish();
+ // System.out.println("Post Finish private method call - Transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAMgoADgAZCQAaABsIABwKAB0AHggAHwgAIAoADQAhCAAiCwAjACQIACUKAA0AJggA" +
+ "JwcAKAcAKQEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVTdGFydAEA" +
+ "BkZpbmlzaAEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7KVYBAApTb3VyY2VGaWxlAQAO" +
+ "VHJhbnNmb3JtLmphdmEMAA8AEAcAKgwAKwAsAQAdSGVsbG8gLSBwcml2YXRlIC0gVHJhbnNmb3Jt" +
+ "ZWQHAC0MAC4ALwEAH0dvb2RieWUgLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQBACtQcmUgU3RhcnQg" +
+ "cHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkDAATABABACxQb3N0IFN0YXJ0IHByaXZh" +
+ "dGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAcAMAwAMQAQAQAsUHJlIEZpbmlzaCBwcml2YXRl" +
+ "IG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQMABQAEAEALVBvc3QgRmluaXNoIHByaXZhdGUgbWV0" +
+ "aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAEACVRyYW5zZm9ybQEAEGphdmEvbGFuZy9PYmplY3QBABBq" +
+ "YXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9Q" +
+ "cmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBABJqYXZhL2xhbmcv" +
+ "UnVubmFibGUBAANydW4AIAANAA4AAAAAAAQAAAAPABAAAQARAAAAHQABAAEAAAAFKrcAAbEAAAAB" +
+ "ABIAAAAGAAEAAAABAAIAEwAQAAEAEQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEAEgAAAAoAAgAA" +
+ "AAMACAAEAAIAFAAQAAEAEQAAACUAAgABAAAACbIAAhIFtgAEsQAAAAEAEgAAAAoAAgAAAAcACAAI" +
+ "AAEAFQAWAAEAEQAAAGMAAgACAAAAL7IAAhIGtgAEKrcAB7IAAhIItgAEK7kACQEAsgACEgq2AAQq" +
+ "twALsgACEgy2AASxAAAAAQASAAAAIgAIAAAACwAIAAwADAANABQADgAaAA8AIgAQACYAEQAuABIA" +
+ "AQAXAAAAAgAY");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCM0QYTJmX+NsZXkImojgSkJtXyuew3oaXcBAAAcAAAAHhWNBIAAAAAAAAAADwEAAAX" +
+ "AAAAcAAAAAcAAADMAAAAAwAAAOgAAAABAAAADAEAAAcAAAAUAQAAAQAAAEwBAABwAwAAbAEAAD4C" +
+ "AABGAgAATgIAAG8CAACOAgAAmwIAALICAADGAgAA3AIAAPACAAAEAwAAMwMAAGEDAACPAwAAvAMA" +
+ "AMMDAADTAwAA1gMAANoDAADuAwAA8wMAAPwDAAABBAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+ "EAAAABAAAAAGAAAAAAAAABEAAAAGAAAAMAIAABEAAAAGAAAAOAIAAAUAAQATAAAAAAAAAAAAAAAA" +
+ "AAAAAQAAAAAAAAAOAAAAAAABABYAAAABAAIAFAAAAAIAAAAAAAAAAwAAABUAAAAAAAAAAAAAAAIA" +
+ "AAAAAAAADwAAAAAAAAAmBAAAAAAAAAEAAQABAAAACAQAAAQAAABwEAUAAAAOAAMAAQACAAAADQQA" +
+ "AAkAAABiAAAAGwECAAAAbiAEABAADgAAAAMAAQACAAAAEwQAAAkAAABiAAAAGwEDAAAAbiAEABAA" +
+ "DgAAAAQAAgACAAAAGQQAACoAAABiAAAAGwENAAAAbiAEABAAcBACAAIAYgAAABsBCwAAAG4gBAAQ" +
+ "AHIQBgADAGIAAAAbAQwAAABuIAQAEABwEAEAAgBiAAAAGwEKAAAAbiAEABAADgABAAAAAwAAAAEA" +
+ "AAAEAAY8aW5pdD4ABkZpbmlzaAAfR29vZGJ5ZSAtIHByaXZhdGUgLSBUcmFuc2Zvcm1lZAAdSGVs" +
+ "bG8gLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQAC0xUcmFuc2Zvcm07ABVMamF2YS9pby9QcmludFN0" +
+ "cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwAUTGphdmEvbGFuZy9SdW5uYWJsZTsAEkxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AC1Qb3N0IEZpbmlzaCBwcml2YXRlIG1ldGhv" +
+ "ZCBjYWxsIC0gVHJhbnNmb3JtZWQALFBvc3QgU3RhcnQgcHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRy" +
+ "YW5zZm9ybWVkACxQcmUgRmluaXNoIHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAr" +
+ "UHJlIFN0YXJ0IHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAFU3RhcnQADlRyYW5z" +
+ "Zm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjEzAANvdXQAB3ByaW50bG4AA3J1bgAF" +
+ "c2F5SGkAAQAHDgAHAAcOhwADAAcOhwALAQAHDoc8hzyHPIcAAAADAQCAgATsAgEChAMBAqgDAwHM" +
+ "Aw0AAAAAAAAAAQAAAAAAAAABAAAAFwAAAHAAAAACAAAABwAAAMwAAAADAAAAAwAAAOgAAAAEAAAA" +
+ "AQAAAAwBAAAFAAAABwAAABQBAAAGAAAAAQAAAEwBAAABIAAABAAAAGwBAAABEAAAAgAAADACAAAC" +
+ "IAAAFwAAAD4CAAADIAAABAAAAAgEAAAAIAAAAQAAACYEAAAAEAAAAQAAADwEAAA=");
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ t.sayHi(() -> {
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ }
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/915-obsolete-2/src/Transform.java b/test/915-obsolete-2/src/Transform.java
new file mode 100644
index 0000000..e914e29
--- /dev/null
+++ b/test/915-obsolete-2/src/Transform.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ private void Start() {
+ System.out.println("hello - private");
+ }
+
+ private void Finish() {
+ System.out.println("goodbye - private");
+ }
+
+ public void sayHi(Runnable r) {
+ System.out.println("Pre Start private method call");
+ Start();
+ System.out.println("Post Start private method call");
+ r.run();
+ System.out.println("Pre Finish private method call");
+ Finish();
+ System.out.println("Post Finish private method call");
+ }
+}
diff --git a/test/916-obsolete-jit/build b/test/916-obsolete-jit/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/916-obsolete-jit/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/916-obsolete-jit/expected.txt b/test/916-obsolete-jit/expected.txt
new file mode 100644
index 0000000..04aff3a
--- /dev/null
+++ b/test/916-obsolete-jit/expected.txt
@@ -0,0 +1,21 @@
+Pre Start private method call
+hello - private
+Post Start private method call
+Not doing anything here
+Pre Finish private method call
+goodbye - private
+Post Finish private method call
+Pre Start private method call
+hello - private
+Post Start private method call
+transforming calling function
+Pre Finish private method call
+Goodbye - private - Transformed
+Post Finish private method call
+Pre Start private method call - Transformed
+Hello - private - Transformed
+Post Start private method call - Transformed
+Not doing anything here
+Pre Finish private method call - Transformed
+Goodbye - private - Transformed
+Post Finish private method call - Transformed
diff --git a/test/916-obsolete-jit/info.txt b/test/916-obsolete-jit/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/916-obsolete-jit/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/916-obsolete-jit/run b/test/916-obsolete-jit/run
new file mode 100755
index 0000000..25c2c07
--- /dev/null
+++ b/test/916-obsolete-jit/run
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jit"* ]]; then
+ other_args=""
+else
+ other_args="--jit"
+fi
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+ if [[ "$@" != *"--debuggable"* ]]; then
+ other_args="$other_args -Xcompiler-option --debuggable "
+ fi
+fi
+
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=915-obsolete-2,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xfully-deoptable \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/916-obsolete-jit/src/Main.java b/test/916-obsolete-jit/src/Main.java
new file mode 100644
index 0000000..d4fa667
--- /dev/null
+++ b/test/916-obsolete-jit/src/Main.java
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+ private static boolean do_print = false;
+
+ public static boolean doPrint() {
+ return do_print;
+ }
+
+ // Since the transformed code isn't really an issue this is the same as test 915.
+ // class Transform {
+ // private void Start() {
+ // System.out.println("Hello - private - Transformed");
+ // }
+ //
+ // private void Finish() {
+ // System.out.println("Goodbye - private - Transformed");
+ // }
+ //
+ // public void sayHi(Runnable r) {
+ // System.out.println("Pre Start private method call - Transformed");
+ // Start();
+ // System.out.println("Post Start private method call - Transformed");
+ // r.run();
+ // System.out.println("Pre Finish private method call - Transformed");
+ // Finish();
+ // System.out.println("Post Finish private method call - Transformed");
+ // }
+ // }
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAMgoADgAZCQAaABsIABwKAB0AHggAHwgAIAoADQAhCAAiCwAjACQIACUKAA0AJggA" +
+ "JwcAKAcAKQEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVTdGFydAEA" +
+ "BkZpbmlzaAEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7KVYBAApTb3VyY2VGaWxlAQAO" +
+ "VHJhbnNmb3JtLmphdmEMAA8AEAcAKgwAKwAsAQAdSGVsbG8gLSBwcml2YXRlIC0gVHJhbnNmb3Jt" +
+ "ZWQHAC0MAC4ALwEAH0dvb2RieWUgLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQBACtQcmUgU3RhcnQg" +
+ "cHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRyYW5zZm9ybWVkDAATABABACxQb3N0IFN0YXJ0IHByaXZh" +
+ "dGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAcAMAwAMQAQAQAsUHJlIEZpbmlzaCBwcml2YXRl" +
+ "IG1ldGhvZCBjYWxsIC0gVHJhbnNmb3JtZWQMABQAEAEALVBvc3QgRmluaXNoIHByaXZhdGUgbWV0" +
+ "aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAEACVRyYW5zZm9ybQEAEGphdmEvbGFuZy9PYmplY3QBABBq" +
+ "YXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9Q" +
+ "cmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBABJqYXZhL2xhbmcv" +
+ "UnVubmFibGUBAANydW4AIAANAA4AAAAAAAQAAAAPABAAAQARAAAAHQABAAEAAAAFKrcAAbEAAAAB" +
+ "ABIAAAAGAAEAAAABAAIAEwAQAAEAEQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEAEgAAAAoAAgAA" +
+ "AAMACAAEAAIAFAAQAAEAEQAAACUAAgABAAAACbIAAhIFtgAEsQAAAAEAEgAAAAoAAgAAAAcACAAI" +
+ "AAEAFQAWAAEAEQAAAGMAAgACAAAAL7IAAhIGtgAEKrcAB7IAAhIItgAEK7kACQEAsgACEgq2AAQq" +
+ "twALsgACEgy2AASxAAAAAQASAAAAIgAIAAAACwAIAAwADAANABQADgAaAA8AIgAQACYAEQAuABIA" +
+ "AQAXAAAAAgAY");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCM0QYTJmX+NsZXkImojgSkJtXyuew3oaXcBAAAcAAAAHhWNBIAAAAAAAAAADwEAAAX" +
+ "AAAAcAAAAAcAAADMAAAAAwAAAOgAAAABAAAADAEAAAcAAAAUAQAAAQAAAEwBAABwAwAAbAEAAD4C" +
+ "AABGAgAATgIAAG8CAACOAgAAmwIAALICAADGAgAA3AIAAPACAAAEAwAAMwMAAGEDAACPAwAAvAMA" +
+ "AMMDAADTAwAA1gMAANoDAADuAwAA8wMAAPwDAAABBAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+ "EAAAABAAAAAGAAAAAAAAABEAAAAGAAAAMAIAABEAAAAGAAAAOAIAAAUAAQATAAAAAAAAAAAAAAAA" +
+ "AAAAAQAAAAAAAAAOAAAAAAABABYAAAABAAIAFAAAAAIAAAAAAAAAAwAAABUAAAAAAAAAAAAAAAIA" +
+ "AAAAAAAADwAAAAAAAAAmBAAAAAAAAAEAAQABAAAACAQAAAQAAABwEAUAAAAOAAMAAQACAAAADQQA" +
+ "AAkAAABiAAAAGwECAAAAbiAEABAADgAAAAMAAQACAAAAEwQAAAkAAABiAAAAGwEDAAAAbiAEABAA" +
+ "DgAAAAQAAgACAAAAGQQAACoAAABiAAAAGwENAAAAbiAEABAAcBACAAIAYgAAABsBCwAAAG4gBAAQ" +
+ "AHIQBgADAGIAAAAbAQwAAABuIAQAEABwEAEAAgBiAAAAGwEKAAAAbiAEABAADgABAAAAAwAAAAEA" +
+ "AAAEAAY8aW5pdD4ABkZpbmlzaAAfR29vZGJ5ZSAtIHByaXZhdGUgLSBUcmFuc2Zvcm1lZAAdSGVs" +
+ "bG8gLSBwcml2YXRlIC0gVHJhbnNmb3JtZWQAC0xUcmFuc2Zvcm07ABVMamF2YS9pby9QcmludFN0" +
+ "cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwAUTGphdmEvbGFuZy9SdW5uYWJsZTsAEkxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AC1Qb3N0IEZpbmlzaCBwcml2YXRlIG1ldGhv" +
+ "ZCBjYWxsIC0gVHJhbnNmb3JtZWQALFBvc3QgU3RhcnQgcHJpdmF0ZSBtZXRob2QgY2FsbCAtIFRy" +
+ "YW5zZm9ybWVkACxQcmUgRmluaXNoIHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAr" +
+ "UHJlIFN0YXJ0IHByaXZhdGUgbWV0aG9kIGNhbGwgLSBUcmFuc2Zvcm1lZAAFU3RhcnQADlRyYW5z" +
+ "Zm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjEzAANvdXQAB3ByaW50bG4AA3J1bgAF" +
+ "c2F5SGkAAQAHDgAHAAcOhwADAAcOhwALAQAHDoc8hzyHPIcAAAADAQCAgATsAgEChAMBAqgDAwHM" +
+ "Aw0AAAAAAAAAAQAAAAAAAAABAAAAFwAAAHAAAAACAAAABwAAAMwAAAADAAAAAwAAAOgAAAAEAAAA" +
+ "AQAAAAwBAAAFAAAABwAAABQBAAAGAAAAAQAAAEwBAAABIAAABAAAAGwBAAABEAAAAgAAADACAAAC" +
+ "IAAAFwAAAD4CAAADIAAABAAAAAgEAAAAIAAAAQAAACYEAAAAEAAAAQAAADwEAAA=");
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform());
+ }
+
+ // TODO Workaround to (1) inability to ensure that current_method is not put into a register by
+ // the JIT and/or (2) inability to deoptimize frames near runtime functions.
+ // TODO Fix one/both of these issues.
+ public static void doCall(Runnable r) {
+ r.run();
+ }
+
+ private static boolean interpreting = true;
+ public static void doTest(Transform t) {
+ do_print = false;
+ Runnable noop = () -> {};
+ long j = 0;
+ do {
+ for (int i = 0; i < 10000; i++) {
+ t.sayHi(noop);
+ j++;
+ }
+ t.sayHi(() -> {
+ // TODO 2 is wrong It is checking a piece of this lambda method but should be fine due to
+ // how we implement lambdas. Be sure to fix before really submitting.
+ interpreting = Main.isInterpretedFunction("LTransform;->sayHi()V");
+ });
+ if (j >= 1000000) {
+ System.out.println("FAIL: Could not make sayHi be Jitted!");
+ return;
+ }
+ j++;
+ } while(interpreting);
+ // Start printing.
+ do_print = true;
+ t.sayHi(() -> {
+ System.out.println("Not doing anything here");
+ });
+ t.sayHi(() -> {
+ if (Main.isInterpretedFunction("LTransform;->sayHi()V")) {
+ System.out.println("FAIL: sayHi is being interpreted!");
+ }
+ System.out.println("transforming calling function");
+ doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ });
+ t.sayHi(() -> { System.out.println("Not doing anything here"); });
+ }
+
+ private static native boolean isInterpretedFunction(String name);
+
+ // Transforms the class
+ private static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+}
diff --git a/test/916-obsolete-jit/src/Transform.java b/test/916-obsolete-jit/src/Transform.java
new file mode 100644
index 0000000..a22f389
--- /dev/null
+++ b/test/916-obsolete-jit/src/Transform.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ private void Start() {
+ if (Main.doPrint()) {
+ System.out.println("hello - private");
+ }
+ }
+
+ private void Finish() {
+ if (Main.doPrint()) {
+ System.out.println("goodbye - private");
+ }
+ }
+
+ public void sayHi(Runnable r) {
+ if (Main.doPrint()) {
+ System.out.println("Pre Start private method call");
+ }
+ Start();
+ if (Main.doPrint()) {
+ System.out.println("Post Start private method call");
+ }
+ // TODO Revist with b/33616143
+ // TODO Uncomment this
+ // r.run();
+ // TODO This is a very temporary fix until we get either deoptimization near runtime frames
+ // working, forcing current method to be always read from the stack or both working.
+ Main.doCall(r);
+ if (Main.doPrint()) {
+ System.out.println("Pre Finish private method call");
+ }
+ Finish();
+ if (Main.doPrint()) {
+ System.out.println("Post Finish private method call");
+ }
+ }
+}
diff --git a/test/Android.bp b/test/Android.bp
index 2625f56..c42ffc9 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -243,6 +243,8 @@
name: "libtiagent-defaults",
defaults: ["libartagent-defaults"],
srcs: [
+ // This is to get the IsInterpreted native method.
+ "common/stack_inspect.cc",
"ti-agent/common_load.cc",
"ti-agent/common_helper.cc",
"901-hello-ti-agent/basics.cc",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index b515130..2b620b5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -280,6 +280,9 @@
911-get-stack-trace \
912-classes \
913-heaps \
+ 914-hello-obsolescence \
+ 915-obsolete-2 \
+ 916-obsolete-jit \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 4df2d47..eefaabc 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -52,6 +52,53 @@
return IsInterpreted(env, klass, 1);
}
+// public static native boolean isInterpreted(int depth);
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedAt(JNIEnv* env,
+ jclass klass,
+ jint depth) {
+ return IsInterpreted(env, klass, depth);
+}
+
+
+// public static native boolean isInterpretedFunction(String smali);
+
+struct MethodIsInterpretedVisitor : public StackVisitor {
+ public:
+ MethodIsInterpretedVisitor(Thread* thread, std::string shorty)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ shorty_(shorty),
+ method_is_interpreted_(false) {}
+
+ virtual bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!GetMethod()->IsRuntimeMethod() && shorty_ == GetMethod()->GetShorty()) {
+ method_is_interpreted_ = IsShadowFrame();
+ return false;
+ }
+ return true;
+ }
+
+ bool IsInterpreted() {
+ return method_is_interpreted_;
+ }
+
+ private:
+ const std::string shorty_;
+ bool method_is_interpreted_;
+};
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jstring name) {
+ if (Runtime::Current() == nullptr) {
+ return JNI_TRUE;
+ }
+ ScopedObjectAccess soa(env);
+ MethodIsInterpretedVisitor v(soa.Self(), soa.Decode<mirror::String>(name)->ToModifiedUtf8());
+ v.WalkStack();
+ return v.IsInterpreted();
+}
+
// public static native void assertIsInterpreted();
extern "C" JNIEXPORT void JNICALL Java_Main_assertIsInterpreted(JNIEnv* env, jclass klass) {
diff --git a/test/etc/default-build b/test/etc/default-build
index 51ae175..faa0813 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -69,6 +69,7 @@
# Setup experimental flag mappings in a bash associative array.
declare -A JACK_EXPERIMENTAL_ARGS
+JACK_EXPERIMENTAL_ARGS["agents"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
JACK_EXPERIMENTAL_ARGS["default-methods"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
JACK_EXPERIMENTAL_ARGS["lambdas"]="-D jack.java.source.version=1.8 -D jack.android.min-api-level=24"
JACK_EXPERIMENTAL_ARGS["method-handles"]="-D jack.java.source.version=1.7 -D jack.android.min-api-level=o-b1"
@@ -76,12 +77,14 @@
declare -A SMALI_EXPERIMENTAL_ARGS
SMALI_EXPERIMENTAL_ARGS["default-methods"]="--api-level 24"
SMALI_EXPERIMENTAL_ARGS["method-handles"]="--api-level 26"
+SMALI_EXPERIMENTAL_ARGS["agents"]="--api-level 26"
declare -A JAVAC_EXPERIMENTAL_ARGS
JAVAC_EXPERIMENTAL_ARGS["default-methods"]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS["lambdas"]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS["method-handles"]="-source 1.8 -target 1.8"
JAVAC_EXPERIMENTAL_ARGS[${DEFAULT_EXPERIMENT}]="-source 1.7 -target 1.7"
+JAVAC_EXPERIMENTAL_ARGS["agents"]="-source 1.8 -target 1.8"
while true; do
if [ "x$1" = "x--dx-option" ]; then
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 3e2b168..ebf1e46 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -18,8 +18,11 @@
#include <stdio.h>
+#include "art_method.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
+#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "ti-agent/common_load.h"
#include "utils.h"
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 2795cbc..826c5ad 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -66,6 +66,8 @@
{ "911-get-stack-trace", Test911GetStackTrace::OnLoad, nullptr },
{ "912-classes", Test912Classes::OnLoad, nullptr },
{ "913-heaps", Test913Heaps::OnLoad, nullptr },
+ { "914-hello-obsolescence", common_redefine::OnLoad, nullptr },
+ { "915-obsolete-2", common_redefine::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {